def get_header_image(self, ticket): if ticket: folder = self.context if folder.portal_type in ["Folder", "Event"]: if folder.portal_type == "Event": uuid = folder.UID() brain = uuidToCatalogBrain(uuid) if brain: leadmedia = getattr(brain, 'leadMedia', None) if leadmedia: image = uuidToCatalogBrain(leadmedia) if hasattr(image, 'getURL'): url = image.getURL() scale_url = "%s/%s" %(url, "@@images/image/large") return scale_url else: contents = folder.getFolderContents({"portal_type": "Image", "Title":"tickets-header"}) if len(contents) > 0: image = contents[0] url = image.getURL() scale_url = "%s/%s" %(url, "@@images/image/large") return scale_url else: brains = self.context.portal_catalog(Title="webwinkel-header", portal_type="Image") if len(brains) > 0: brain = brains[0] if brain.portal_type == "Image": url = brain.getURL() scale_url = "%s/%s" %(url, "@@images/image/large") return scale_url return ""
def validateAction(self, data): # ActionExecutionError ... form wide error # WidgetActionExecutionError ... widget specific # TODO: validate all sort of extra info- new object does not exist yet # data contains already field values datasets = data.get('environmental_datasets', {}).keys() if not datasets: # FIXME: Make this a widget error, currently shown as form wide # error raise ActionExecutionError( Invalid('No environmental dataset selected.')) # Determine highest resolution # FIXME: this is slow and needs improvements # and accessing _terms is not ideal res_vocab = getUtility( IVocabularyFactory, 'resolution_source')(self.context) if data.get('scale_down', False): # ... find highest resolution resolution_idx = 99 # Arbitrary choice of upper index limit for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) if idx < resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value else: # ... find lowest resolution resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def validateAction(self, data): # ActionExecutionError ... form wide error # WidgetActionExecutionError ... widget specific # TODO: validate all sort of extra info- new object does not exist yet # data contains already field values datasets = data.get('environmental_datasets', {}).keys() if not datasets: # FIXME: Make this a widget error, currently shown as form wide # error raise ActionExecutionError( Invalid('No environmental dataset selected.')) # Determine highest resolution # FIXME: this is slow and needs improvements # and accessing _terms is not ideal res_vocab = getUtility(IVocabularyFactory, 'resolution_source')(self.context) if data.get('scale_down', False): # ... find highest resolution resolution_idx = 99 # Arbitrary choice of upper index limit for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) if idx < resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value else: # ... find lowest resolution resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def generate_slide_item_from_brain(self, brain): item = {} path = brain.getPath() url = None title = brain.Title description = brain.Description slide_id = brain.getId absoluteurl = brain.getURL() item_portal_type = brain.portal_type link_image = None img_brain = None if item_portal_type == "Link": # Link url = brain.getRemoteUrl if getattr(brain, 'leadMedia', None): img = uuidToCatalogBrain(brain.leadMedia) link_image = "%s/@@images/image/%s" % (img.getURL(), "large") img_brain = img else: link_image, img_brain = self.get_lead_from_contents(brain) # add case for object elif item_portal_type != "Image": # All content types except Image and Link if getattr(brain, 'leadMedia', None): img = uuidToCatalogBrain(brain.leadMedia) if img: url = "%s/@@images/image/%s" % (img.getURL(), "large") img_brain = img else: url, img_brain = self.get_lead_from_contents(brain) else: # Image url = "%s/@@images/image/%s" % (brain.getURL(), "large") img_brain = brain orientation = self.find_orientation(brain) item = { "type": item_portal_type, "url": url, "path": path, "title": title, "description": description, "id": slide_id, "link_image": link_image, "absoluteurl": absoluteurl, "orientation": orientation, "img_brain": img_brain } return item
def get_related_brains(self): """ Obtem o brain do objeto cujo attr uuid faz referencia. """ uuids = self.data.get('uuids', None) brains = [] if uuids: for uuid in uuids: if uuidToCatalogBrain(uuid): brains.append(uuidToCatalogBrain(uuid)) return brains
def getLeadImageLink(self): try: context_uid = self.context.UID() brain = uuidToCatalogBrain(context_uid) if brain: img = uuidToCatalogBrain(getattr(brain, 'leadMedia', None)) if img: url = "%s/@@images/image/%s" % (img.getURL(), "large") return url except: raise return ""
def update(self): context = self.context clubs_present = getattr(context, 'clubs_present', []) clubs_attended = getattr(context, 'clubs_attended', []) clubs_absent = getattr(context, 'clubs_absent', []) club_officers_emailed = getattr(context, 'club_officers_emailed', []) missing_member_data = getattr(context, 'missing_member_data', []) clubs_attended_brains = [] if clubs_attended: [ clubs_attended_brains.append(uuidToCatalogBrain(p_uuid)) for p_uuid in clubs_attended ] clubs_absent_brains = [] if clubs_absent: [ clubs_absent_brains.append(uuidToCatalogBrain(a_uuid)) for a_uuid in clubs_absent ] self.clubs_attended_brains = clubs_attended_brains self.clubs_absent_brains = clubs_absent_brains if club_officers_emailed: self.club_officers_emailed_string = ', '.join( club_officers_emailed) else: self.club_officers_emailed_string = '' if missing_member_data: self.missing_member_data_string = ', '.join(missing_member_data) else: self.missing_member_data_string = '' description_text = getattr(context, 'description', u'') if description_text: portal_transforms = api.portal.get_tool(name='portal_transforms') description_data = portal_transforms.convertTo( 'text/html', description_text, mimetype='text/-x-web-intelligent') self.description_html = description_data.getData() else: self.description_html = description_text
def collection(self): item = self._collection if not item: self._collection = item = uuidToCatalogBrain( self.data.target_collection ) return item
def renderCell(self, item): """ """ value = self.getValue(item) if not value: return '-' ret = [] if not isinstance(value, list): value = [value] for val in value: if val.startswith('l:'): continue c_brain = uuidToCatalogBrain(val) if not c_brain: ret.append('-') else: ret.append( u"<a href='%s' target='_blank' class='pretty_link link-tooltip'>" u"<span class='pretty_link_icons'>%s</span>" u"<span class='pretty_link_content'>%s</span></a>" % (c_brain.getURL(), self._icons(c_brain), safe_unicode(c_brain.get_full_title))) l_ret = len(ret) if l_ret == 1: return ret[0] elif l_ret > 1: return '<ul class="%s"><li>%s</li></ul>' % ( self.ul_class, '</li>\n<li>'.join(ret)) else: return '-'
def validateAction(self, data): """ Get resolution from SDM and use it to find future datasets TODO: if required layers are not available in future datasets, use current layers from SDM """ # ActionExecutionError ... form wide error # WidgetActionExecutionError ... widget specific # TODO: match result layers with sdm layers and get missing layers from SDM? # -> only environmental? or missing climate layers as well? # do matching here? or in job submit? datasets = data.get('future_climate_datasets', []) if not datasets: # FIXME: Make this a widget error, currently shown as form wide error raise ActionExecutionError(Invalid('No future climate dataset selected.')) models = data.get('species_distribution_models', {}) if not tuple(chain.from_iterable(x for x in models.values())): raise ActionExecutionError(Invalid('No source dataset selected.')) # Determine lowest resolution # FIXME: this is slow and needs improvements # and accessing _terms is not ideal res_vocab = getUtility(IVocabularyFactory, 'resolution_source')(self.context) resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index(res_vocab.getTerm(dsbrain.BCCResolution)) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def validateAction(self, data): # ActionExecutionError ... form wide error # WidgetActionExecutionError ... widget specific # TODO: validate all sort of extra info- new object does not exist yet # data contains already field values datasets = data.get('environmental_datasets', {}).keys() if not datasets: # FIXME: Make this a widget error, currently shown as form wide error raise ActionExecutionError(Invalid('No environmental dataset selected.')) # TODO: we should make sure only user picks only one option, otherwise pseudo absence # will be preferred (maybe we can do both?, select absence points, and fill up mith pseudo absences?) # we need an absence dataset or a a number of pseudo absence points if not data.get('species_pseudo_absence_points'): if not data.get('species_absence_dataset'): raise ActionExecutionError(RequiredMissing('No absence points selected.')) else: numabspoints = data.get('species_number_pseudo_absence_points') if not numabspoints: raise ActionExecutionError(RequiredMissing('No absence points selected')) elif numabspoints <= 0: raise ActionExecutionError(Invalid('Number of absence points must be greater than 0.')) # Determine lowest resolution # FIXME: this is slow and needs improvements # and accessing _terms is not ideal res_vocab = getUtility(IVocabularyFactory, 'resolution_source')(self.context) resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): idx = res_vocab._terms.index(res_vocab.getTerm(dsbrain.BCCResolution)) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def rat(self): uuid = self.request.form.get('uuid') layer = self.request.form.get('layer') brain = None try: brain = uuidToCatalogBrain(uuid) except Exception as e: LOG.error('Caught exception %s', e) if not brain: self.record_error('Not Found', 404, 'dataset not found', {'parameter': 'uuid'}) raise NotFound(self, 'metadata', self.request) md = IBCCVLMetadata(brain.getObject()) if not layer and layer not in md.get('layers', {}): self.record_error('Bad Request', 400, 'Missing parameter layer', {'parameter': 'layer'}) raise BadRequest('Missing parameter layer') try: rat = md.get('layers', {}).get(layer, {}).get('rat') rat = json.loads(unicode(rat)) return rat except Exception as e: LOG.warning( "Couldn't decode Raster Attribute Table from metadata. %s: %s", self.context, repr(e)) raise NotFound(self, 'rat', self.request)
def learningString(self): #text contained in SearcheableText catalog = getToolByName(self.context, 'portal_catalog') brain = uuidToCatalogBrain(self.context.UID()) rid = brain.getRID() data = catalog.getIndexDataForRID(rid) return ' '.join(data['SearchableText'])
def get_next_obj(self, start, collection_id): pagesize = 33 if "/" not in start: object_id = self.context.getId() collection_object = uuidToCatalogBrain(collection_id) if collection_object: if collection_object.portal_type == "Collection": results = self.get_batch(collection_object, start, pagesize) object_idx = self.get_object_idx(results, object_id) if object_idx < results.items_on_page-1: return results[object_idx+1] else: if results.has_next: page = results.nextpage page -= 1 start = int(start) start = (page * pagesize) b_results = self.get_batch(collection_object, start, pagesize) first_element = b_results[0] return first_element else: start = 0 b_results = self.get_batch(collection_object, start, pagesize) first_element = b_results[0] return first_element
def items(self): # return dict with keys for experiment # and subkey 'models' for models within experiment if self.value: for experiment_uuid, model_uuids in self.value.items(): item = {} expbrain = uuidToCatalogBrain(experiment_uuid) item['title'] = expbrain.Title item['uuid'] = expbrain.UID # TODO: what else wolud I need from an experiment? exp = expbrain.getObject() expmd = IBCCVLMetadata(exp) item['resolution'] = expmd.get('resolution') item['brain'] = expbrain # now search all models within and add infos pc = getToolByName(self.context, 'portal_catalog') brains = pc.searchResults(path=expbrain.getPath(), BCCDataGenre=self.genre) # TODO: maybe as generator? item['datasets'] = [{ 'uuid': brain.UID, 'title': brain.Title, 'obj': brain.getObject(), 'md': IBCCVLMetadata(brain.getObject()), 'selected': brain.UID in self.value[experiment_uuid] } for brain in brains] yield item
def subitems(self, dsbrain): # return a generator of selectable items within dataset md = IBCCVLMetadata(dsbrain.getObject()) layer_vocab = self.dstools.layer_vocab selectedsubitems = self.value.get(dsbrain.UID) or () if md.get('genre') != 'DataGenreSpeciesCollection': for layer in sorted(md.get('layers', ())): subitem = { 'id': layer, 'title': layer_vocab.getTerm(layer).title if layer in layer_vocab else layer, 'selected': layer in selectedsubitems, } yield subitem for subdsid in sorted(getattr(dsbrain.getObject(), 'parts', ())): part = uuidToCatalogBrain(subdsid) # TODO: should we just ignore it? if not part: continue subitem = { 'id': subdsid, 'title': part.Title, 'selected': subdsid in selectedsubitems } yield subitem
def query(self, form): """ Get value from form and return a catalog dict query """ query = {} index = self.data.get('index', '') index = index.encode('utf-8', 'replace') if not index: return query value = form.get(self.data.getId(), '') if value: value = uuidToCatalogBrain(value) if value: value = value.getPath() if not value: portal_url = getToolByName(self.context, 'portal_url') root = self.data.get('root', '') if root.startswith('/'): root = root[1:] value = '/'.join([portal_url.getPortalPath(), root]) if not value: return query depth = safeToInt(self.data.get('depth', -1)) query[index] = {"query": value, 'level': depth} return query
def get_prev_obj(self, start, collection_id): pagesize = 33 if "/" not in start: object_id = self.context.getId() collection_object = uuidToCatalogBrain(collection_id) if collection_object: if collection_object.portal_type == "Collection": ## Get Batch of collection results = self.get_batch(collection_object, start, pagesize) ## Get prev item object_idx = self.get_object_idx(results, object_id) if object_idx > 0: return results[object_idx-1] else: if results.has_previous: page = results.previouspage start = int(start) start = (page * pagesize) - pagesize b_results = self.get_batch(collection_object, start, pagesize) last_element = b_results[b_results.items_on_page-1] return last_element else: lastpage = results.lastpage start = int(start) start = (lastpage * pagesize) - pagesize b_results = self.get_batch(collection_object, start, pagesize) last_element = b_results[b_results.items_on_page-1] return last_element
def items_old(self): # FIXME importing here to avoid circular import of IDataset from org.bccvl.site.api.dataset import getdsmetadata if self.value: for uuid in self.value: brain = uuidToCatalogBrain(uuid) # TODO: could use layer vocab again md = getdsmetadata(brain) layers = self.value[uuid] # FIXME: check if layers or layers_used here for layer, layeritem in md['layers'].iteritems(): if not layer in layers: continue mimetype = 'application/octet-stream' layerfile = None if 'filename' in layeritem: # FIXME: hardcoded mimetype logic for zip files. # should draw mimetype info from layer metadata # assumes there are only geotiff in zip files mimetype = 'image/geotiff' layerfile = layeritem['filename'] vizurl = '{0}#{1}'.format(md['vizurl'], layerfile) else: vizurl = md['vizurl'] mimetype = md['mimetype'] yield {"brain": brain, "resolution": self.dstools.resolution_vocab.getTerm(brain['BCCResolution']), "layer": self.dstools.layer_vocab.getTerm(layer), "vizurl": vizurl, 'mimetype': mimetype, 'vizlayer': layerfile}
def validateAction(self, data): datasets = list(chain.from_iterable(data.get('datasets', {}).values())) if not datasets: # FIXME: Make this a widget error, currently shown as form wide # error raise ActionExecutionError(Invalid('No dataset selected.')) # all selected datasets are combined into one ensemble analysis # get resolution for ensembling # Determine lowest resolution # FIXME: An experiment should store the resolution metadata on the dataset # e.g. an SDM current projection needs to store resolution on tif file res_vocab = getUtility( IVocabularyFactory, 'resolution_source')(self.context) resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): try: idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) except: # FIXME: need faster way to order resolutions idx = res_vocab._terms.index(res_vocab.getTerm( dsbrain.getObject().__parent__.job_params['resolution'])) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def internal_link_url(self): if self.data.get('internal_link'): try: brain = uuidToCatalogBrain(self.data['internal_link'][0]) return brain.getURL() except Exception: pass
def speciestraits_listing_details(expbrain): # FIXME: implement this exp = expbrain.getObject() species_occ = get_title_from_uuid(exp.species_traits_dataset, u'(Unavailable)') if exp.species_traits_dataset else '' toolkits_species = exp.algorithms_species or [] toolkits_diff = exp.algorithms_diff or [] toolkits = ', '.join(get_title_from_uuid(uuid, u'(Unavailable)') for uuid in chain(toolkits_species, toolkits_diff) if uuid) envds = exp.environmental_datasets or {} envlayers = [] for envuuid, layers in sorted(envds.items()): envbrain = uuidToCatalogBrain(envuuid) envtitle = envbrain.Title if envbrain else u'Missing dataset' envlayers.append({ 'title': envtitle, 'layers': sorted(layers) }) details = {} details.update({ 'type': 'SPECIES TRAITS', 'functions': toolkits, 'species_occurrence': species_occ, 'species_absence': '', 'environmental_layers': envlayers, }) return details
def validateAction(self, data): datasets = list(chain.from_iterable(data.get('datasets', {}).values())) if not datasets: # FIXME: Make this a widget error, currently shown as form wide # error raise ActionExecutionError(Invalid('No dataset selected.')) # all selected datasets are combined into one ensemble analysis # get resolution for ensembling # Determine lowest resolution # FIXME: An experiment should store the resolution metadata on the dataset # e.g. an SDM current projection needs to store resolution on tif file res_vocab = getUtility(IVocabularyFactory, 'resolution_source')(self.context) resolution_idx = -1 for dsbrain in (uuidToCatalogBrain(d) for d in datasets): try: idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.BCCResolution)) except: # FIXME: need faster way to order resolutions idx = res_vocab._terms.index( res_vocab.getTerm(dsbrain.getObject().__parent__. job_params['resolution'])) if idx > resolution_idx: resolution_idx = idx data['resolution'] = res_vocab._terms[resolution_idx].value
def find_orientation(self, item): if type(item) == str: if item == "L": return "landscape" else: return "portrait" item_class = "" if item.portal_type == "Image": image_obj = item.getObject() if getattr(image_obj, 'image', None): try: w, h = image_obj.image.getImageSize() if w > h: item_class = "%s" % ('landscape') else: item_class = "%s" % ('portrait') except: return item_class elif item.hasMedia: image = uuidToCatalogBrain(item.leadMedia) if image: image_obj = image.getObject() if getattr(image_obj, 'image', None): try: w, h = image_obj.image.getImageSize() if w > h: item_class = "%s" % ('landscape') else: item_class = "%s" % ('portrait') except: return item_class return item_class
def getImageClass(self, item, has_media=False): item_class = "entry" if item.portal_type == "Image": image_obj = item.getObject() if getattr(image_obj, 'image', None): try: w, h = image_obj.image.getImageSize() if w > h: item_class = "%s %s" % (item_class, 'landscape') else: item_class = "%s %s" % (item_class, 'portrait') except: return item_class elif has_media: image = uuidToCatalogBrain(item.leadMedia) image_obj = image.getObject() if getattr(image_obj, 'image', None): try: w, h = image_obj.image.getImageSize() if w > h: item_class = "%s %s" % (item_class, 'landscape') else: item_class = "%s %s" % (item_class, 'portrait') except: return item_class return item_class
def items(self): # return dict with keys for experiment # and subkey 'models' for models within experiment if self.value: for experiment_uuid, model_uuids in self.value.items(): item = {} expbrain = uuidToCatalogBrain(experiment_uuid) item['title'] = expbrain.Title item['uuid'] = expbrain.UID # TODO: what else wolud I need from an experiment? exp = expbrain.getObject() expmd = IBCCVLMetadata(exp) item['resolution'] = expmd.get('resolution') item['brain'] = expbrain # now search all models within and add infos pc = getToolByName(self.context, 'portal_catalog') brains = pc.searchResults(path=expbrain.getPath(), BCCDataGenre=self.genre) # TODO: maybe as generator? item['datasets'] = [{'uuid': brain.UID, 'title': brain.Title, 'obj': brain.getObject(), 'md': IBCCVLMetadata(brain.getObject()), 'selected': brain.UID in self.value[experiment_uuid]} for brain in brains] yield item
def item(self): # return dict with keys for experiment # and subkey 'models' for models within experiment item = {} if self.value: experiment_uuid = self.value.keys()[0] expbrain = uuidToCatalogBrain(experiment_uuid) item['title'] = expbrain.Title item['uuid'] = expbrain.UID exp = expbrain.getObject() item['layers'] = set((chain(*exp.environmental_datasets.values()))) expmd = IBCCVLMetadata(exp) item['resolution'] = expmd['resolution'] # now search all models within and add infos pc = getToolByName(self.context, 'portal_catalog') brains = pc.searchResults(path=expbrain.getPath(), BCCDataGenre=self.genre) # TODO: maybe as generator? item['models'] = [{ 'item': brain, 'uuid': brain.UID, 'title': brain.Title, 'selected': brain.UID in self.value[experiment_uuid] } for brain in brains] return item
def find_url(ob, url): for bad in _known_bad: if url.startswith(bad): return False for safe in _safe: if safe in url: return True if 'resolveuid/' in url: # check if object... uid = url.split('resolveuid/')[-1] uid = uid.split('/')[0] return uuidToCatalogBrain(uid) is not None elif (('https://' in url or 'http://' in url) and 'http://nohost' not in url): try: print('checking ' + url) resp = requests.get(url, stream=True, timeout=5) except Exception: resp = BadResponse() return resp.status_code == 200 else: if 'http://nohost' in url: url = url.replace('http://nohost', '') url = url.replace( '%20', ' ').split('?')[0].split('#')[0].split('/@@images')[0] try: return ob.restrictedTraverse(str(url), None) is not None except Exception: return False
def learningString(self): # text contained in SearcheableText catalog = getToolByName(self.context, 'portal_catalog') brain = uuidToCatalogBrain(self.context.UID()) rid = brain.getRID() data = catalog.getIndexDataForRID(rid) return ' '.join(data['SearchableText'])
def count(self, brains, sequence=None): """ Intersect results """ res = {} # by checking for facet_counts we assume this is a SolrResponse # from collective.solr if hasattr(brains, 'facet_counts'): facet_fields = brains.facet_counts.get('facet_fields') if facet_fields: index_id = self.data.get('index') facet_field = facet_fields.get(index_id, {}) for value, num in facet_field.items(): if isinstance(value, unicode): res[value] = num else: unicode_value = value.decode('utf-8') res[unicode_value] = num else: # no facet counts were returned. we exit anyway because # zcatalog methods throw an error on solr responses return res res[""] = res['all'] = len(brains) return res else: # this is handled by the zcatalog. see below pass if not sequence: sequence = [key for key, value in self.vocabulary()] if not sequence: return res index_id = self.data.get('index') if not index_id: return res ctool = getToolByName(self.context, 'portal_catalog') index = ctool._catalog.getIndex(index_id) ctool = queryUtility(IFacetedCatalog) if not ctool: return res brains = IISet(brain.getRID() for brain in brains) res[""] = res['all'] = len(brains) for value in sequence: item = uuidToCatalogBrain(value) if not item: res[value] = len(brains) continue rset = ctool.apply_index(self.context, index, item.getPath())[0] rset = IISet(rset) rset = weightedIntersection(brains, rset)[1] if isinstance(value, unicode): res[value] = len(rset) else: unicode_value = value.decode('utf-8') res[unicode_value] = len(rset) return res
def __call__(self, keep_selection_order=False): self.protect() self.errors = [] context = aq_inner(self.context) selection = self.get_selection() parts = str(self.request.form.get('folder', '').lstrip('/')).split('/') if parts: parent = self.site.unrestrictedTraverse('/'.join(parts[:-1])) self.dest = parent.restrictedTraverse(parts[-1]) self.catalog = getToolByName(context, 'portal_catalog') self.mtool = getToolByName(self.context, 'portal_membership') brains = [] if keep_selection_order: brains = [uuidToCatalogBrain(uid) for uid in selection] else: brains = self.catalog(UID=selection, show_inactive=True) for brain in brains: if not brain: continue # remove everyone so we know if we missed any selection.remove(brain.UID) obj = brain.getObject() if ( self.required_obj_permission and not self.mtool.checkPermission( self.required_obj_permission, obj ) ): self.errors.append(_( 'Permission denied for "${title}"', mapping={'title': self.objectTitle(obj)} )) continue obj_id = brain.getId from_obj = parent if parts: from_obj = self.dest try: traversed_obj = from_obj.restrictedTraverse(obj_id) except KeyError: traversed_obj = None if traversed_obj is not None: is_content_proxy = getattr(traversed_obj, 'is_content_proxy', False) is_folder_proxy = getattr(traversed_obj, 'is_folder_proxy', False) if is_content_proxy or is_folder_proxy: self.errors.append(_( 'Invalid operation for "${title}"', mapping={'title': self.objectTitle(traversed_obj)} )) continue self.action(obj) self.finish() return self.message(selection)
def get_title_from_uuid(uuid, default=None): try: obj = uuidToCatalogBrain(uuid) if obj: return obj.Title except Exception as e: pass return default
def items(self): if self.value: for uuid in self.value: brain = uuidToCatalogBrain(uuid) # md = IBCCVLMetadata(brain.getObject()) yield {'title': brain.Title, 'uuid': brain.UID, }
def validate_id(self, item_id): """Check if item_id is a UUID or a the id of a StatusUpdate""" if item_id.isdigit(): container = PLONESOCIAL.microblog if container and int(item_id) in container._status_mapping: return True if uuidToCatalogBrain(item_id) is not None: return True
def get_collection_from_catalog(self, collection_id): uuid = collection_id collection_object = uuidToCatalogBrain(collection_id) if collection_object: if collection_object.portal_type == "Collection": return collection_object return None
def get_func_obj(self, uuid=None, funcid=None): if (funcid is None or funcid in self.func_obj_cache) and uuid is not None: brain = uuidToCatalogBrain(uuid) if brain is None: return None self.func_obj_cache[brain.getId] = brain return self.func_obj_cache[brain.getId] return self.func_obj_cache[funcid]
def make_item(item, next_prev=True): """Make an item for REST API as expected by the frontend client. """ ws = getattr(item, WORKSPACE_ATTRIBUTE, None) or None # at least not Missing.Value # noqa if next_prev: ob = item.getObject() parent = aq_parent(ob) data_previous = None if (parent.portal_type in NODE_TYPES and getattr(aq_base(parent), WORKSPACE_ATTRIBUTE, ws) != ws): # If the parent's ws != current ws it's OK to not reach this branch because: # 1) not a Contribution or Case. # 2) or hasn't set it's ``workspace`` attribute and not a different ws for sure. _prev = uuidToCatalogBrain(IUUID(parent)) if parent else None data_previous = make_item(_prev, next_prev=False) if _prev else None data_next = [] for child in ob.contentValues(): if getattr(aq_base(child), WORKSPACE_ATTRIBUTE, ws) != ws: # If child hasn't set it's ws attribute its not a different WS for sure. _next = uuidToCatalogBrain(IUUID(child)) data_next.append(make_item(_next, next_prev=False)) item = IContentListingObject(item) ret = { "@id": item.getURL(), "@type": item.PortalType(), "UID": item.uuid(), "title": item.Title(), "review_state": item.review_state(), "workspace": ws, "is_workspace_root": item.workspace_root, "created": item.CreationDate(), "modified": item.ModificationDate() } if next_prev: ret["previous_workspace"] = data_previous ret["next_workspaces"] = data_next return ret
def internal_link_url(self): internal_link = self.get_data('internal_link') if internal_link: try: brain = uuidToCatalogBrain(internal_link[0]) if brain: return brain.getURL() except Exception: pass
def items(self): if self.value: for uuid in self.value: brain = uuidToCatalogBrain(uuid) # md = IBCCVLMetadata(brain.getObject()) yield { 'title': brain.Title, 'uuid': brain.UID, }
def projection_listing_details(expbrain): # TODO: duplicate code here... see org.bccvl.site.browser.widget.py # TODO: generated list here not very useful,.... all layers over all sdms are concatenated # TODO: whata about future datasets? details = {} exp = expbrain.getObject() inputexps = set() futureenvs = set() for env_uuid in exp.future_climate_datasets: futureenvs.add(get_title_from_uuid(env_uuid, u'(Unavailable)')) for sdmuuid in exp.species_distribution_models: inputexps.add(get_title_from_uuid(sdmuuid, u'(Unavailable)')) sdmexp = uuidToObject(sdmuuid) if sdmexp is not None: # TODO: absence data envlayers = [] # TODO: list all the subset layers?? if sdmexp.portal_type == 'org.bccvl.content.mmexperiment': environmental_datasets = sdmexp.datasubsets[0].get( 'environmental_datasets') else: environmental_datasets = sdmexp.environmental_datasets for envuuid, layers in sorted(environmental_datasets.items()): envbrain = uuidToCatalogBrain(envuuid) envtitle = envbrain.Title if envbrain else u'Missing dataset' envlayers.append({'title': envtitle, 'layers': sorted(layers)}) # TODO: job_params has only id of function not uuid ... not sure how to get to the title toolkits = ', '.join( uuidToObject(sdmmodel).__parent__.job_params['function'] for sdmmodel in exp.species_distribution_models[sdmuuid]) if sdmexp.portal_type in ('org.bccvl.content.sdmexperiment', 'org.bccvl.content.mmexperiment'): species_occ = get_title_from_uuid( sdmexp.species_occurrence_dataset, u'(Unavailable)' ) if sdmexp.species_occurrence_dataset else '' else: # not sdm,... probably msdm? species_occ = get_title_from_uuid( sdmexp.species_occurrence_collections, u'(Unavailable)' ) if sdmexp.species_occurrence_collections else '' else: toolkits = 'missing experiment' species_occ = '' envlayers = [] details.update({ 'type': 'PROJECTION', 'functions': toolkits, 'species_occurrence': species_occ, 'species_absence': '', 'environmental_layers': envlayers, 'input_experiments': inputexps, 'future_env_datasets': futureenvs }) return details
def checkObjectOnDisplay(self): if self.context.portal_type == "Object": try: brain = uuidToCatalogBrain(self.context.UID()) return brain.object_on_display except: return False else: return False
def experiment_inputs(self, context=None): # return visualisable input datasets for experiment # - used in overlay and compare pages if context is None: context = self.context pc = getToolByName(self.context, 'portal_catalog') if ISDMExperiment.providedBy(context): # for sdm we return selected occurrence and absence dataset # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset, context.species_absence_dataset): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMMExperiment.providedBy(context): # for mme we return selected occurrence dataset only # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset,): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMSDMExperiment.providedBy(context): # muilt species sdm inputs for dsuuid in (context.species_occurrence_collections): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IProjectionExperiment.providedBy(context): # one experiment - multiple models for sdmuuid, models in context.species_distribution_models.items(): sdm = uuidToObject(sdmuuid) if not sdm: continue for model in models: # yield current projections for each model model_brain = uuidToCatalogBrain(model) if not model_brain: continue # Return only constraint SDM projection res_path = model_brain.getPath().rsplit('/', 1) for projection in pc.searchResults(path=res_path, BCCDataGenre='DataGenreCP'): yield projection
def items(self): # return dict with keys for experiment # and subkey 'models' for models within experiment if self.value: for experiment_uuid, model_uuids in self.value.items(): item = {} expbrain = uuidToCatalogBrain(experiment_uuid) # TODO: we have an experiment_uuid, but can't access the # experiment (deleted?, access denied?) # shall we at least try to get some details? if expbrain is None: continue item['title'] = expbrain.Title item['uuid'] = expbrain.UID item['brain'] = expbrain # TODO: what else wolud I need from an experiment? exp = expbrain.getObject() expmd = IBCCVLMetadata(exp) item['resolution'] = expmd.get('resolution') # now search all datasets within and add infos pc = getToolByName(self.context, 'portal_catalog') results = pc.searchResults(path=expbrain.getPath(), portal_type='Folder', job_state='COMPLETED') brains = pc.searchResults(path=[r.getPath() for r in results], BCCDataGenre=self.genre) # TODO: maybe as generator? item['subitems'] = [] for brain in brains: # FIXME: I need a different list of thresholds for display; # esp. don't look up threshold, but take vales (threshold # id and value) from field as is thresholds = dataset.getThresholds(brain.UID)[brain.UID] threshold = self.value[experiment_uuid].get(brain.UID) # is threshold in list? if threshold and threshold['label'] not in thresholds: # maybe a custom entered number? # ... I guess we don't really care as long as we produce the same the user entered. (validate?) thresholds[threshold['label']] = threshold['label'] dsobj = brain.getObject() dsmd = IBCCVLMetadata(dsobj) item['subitems'].append({ 'uuid': brain.UID, 'title': brain.Title, 'selected': brain.UID in self.value[experiment_uuid], 'threshold': threshold, 'thresholds': thresholds, 'brain': brain, 'md': dsmd, 'obj': dsobj, # TODO: this correct? only one layer ever? 'layermd': dsmd['layers'].values()[0] }) yield item
def metadata(self): uuid = self.request.form.get("uuid") try: brain = uuidToCatalogBrain(uuid) if brain: return dataset.getdsmetadata(brain) except Exception as e: LOG.error("Caught exception %s", e) self.record_error("Not Found", "404", "dataset not found", {"parameter": "uuid"}) raise NotFound(self, "metadata", self.request)
def can_access(self): uuid = self.request.form.get('uuid') if uuid: context = uuidToCatalogBrain(uuid) else: context = self.context if context is None: return 'denied' else: return 'allowed'
def renderCell(self, item): """ """ value = self.getValue(item) if not value: return '-' c_brain = uuidToCatalogBrain(value[0]) if not c_brain: return '-' return u"<a href='%s' target='_blank' class='pretty_link'><span class='pretty_link_icons'>%s</span>" \ u"<span class='pretty_link_content'>%s</span></a>" \ % (c_brain.getURL(), self._icons(c_brain), safe_unicode(c_brain.get_full_title))
def metadata(self): uuid = self.request.form.get('uuid') try: brain = uuidToCatalogBrain(uuid) if brain: return dataset.getdsmetadata(brain) except Exception as e: LOG.error('Caught exception %s', e) self.record_error('Not Found', '404', 'dataset not found', { 'parameter': 'uuid'}) raise NotFound(self, 'metadata', self.request)
def getImageObject(self, item, scale="large"): if item.portal_type == "Image": return item.getURL()+"/@@images/image/%s" %(scale) if item.leadMedia != None: uuid = item.leadMedia media_object = uuidToCatalogBrain(uuid) if media_object: return media_object.getURL()+"/@@images/image/%s" %(scale) else: return None else: return None
def get_browse_link(self, uuid): # return link into datasets facetedview to filter given collection collection = uuidToCatalogBrain(uuid) if not collection: return self.datasets_url params = dict(self.defaults) if self.criterion: params[self.criterion.getId()] = collection.UID return "{}#{}".format(self.datasets_url, urlencode(params)) # fallback to original datasets_listing_view groupid = collection.getObject().__parent__.getId() params['datasets.filter.source:list'] = '{}-{}'.format(self.datesets_url, groupid, collection.getId) return "{}/?{}" . format(self.datasets_url, urlencode(params))
def __init__(self, form_, data, errors): self.portal_catalog = getToolByName( plone_api.portal.get(), 'portal_catalog' ) self.form = form_ self.data = data self.errors = errors references = data.get('references') references = references if references else [] self.references = [ (k, uuidToCatalogBrain(v)) for k, v in references ]
def status(self): # redundant? ... see metadata uuid = self.request.form.get('uuid') exp = uuidToCatalogBrain(uuid) if not exp: self.record_error('Not Found', 404, 'Experiment not found', {'parameter': 'uuid'}) raise NotFound(self, 'status', self.request) jt = IExperimentJobTracker(exp.getObject()) return { 'status': jt.state, 'results': jt.states }