def storage(self): annotations = IAnnotations(self.context) scales = annotations.setdefault('plone.scale', ScalesDict()) if not isinstance(scales, ScalesDict): # migrate from PersistentDict to ScalesDict new_scales = ScalesDict(scales) annotations['plone.scale'] = new_scales return new_scales return scales
def storage(self): annotations = IAnnotations(self.context) scales = annotations.setdefault( 'plone.scale', ScalesDict() ) if not isinstance(scales, ScalesDict): # migrate from PersistentDict to ScalesDict new_scales = ScalesDict(scales) annotations['plone.scale'] = new_scales return new_scales return scales
def __call__(self, from_script=False): """Checks if the sitemap feature is enable and returns it.""" registry = getUtility(IRegistry) if HAVE_P5: settings = registry.forInterface(ISiteSchema, prefix="plone") if not settings.enable_sitemap: raise NotFound(self.context, self.filename, self.request) else: sp = getToolByName(self.context, "portal_properties").site_properties if not sp.enable_sitemap: raise NotFound(self.context, self.filename, self.request) results = "" items = None if self.settings.enable_split_sitemaps: items = self.settings.items_per_sitemap if self.settings.async_sitemap_generation: if from_script: logger.info( "View called from script. Generating sitemap and saving " "results") data = self._generate(items) ann = IAnnotations(self.context) sitemap_data = ann.setdefault(ANN_KEY, PersistentMapping()) sitemap_data["data"] = data sitemap_data["generated"] = DateTime().ISO8601() return else: ann = IAnnotations(self.context) sitemap_data = ann.get(ANN_KEY, {}) results = sitemap_data.get("data", "") else: results = self.generate(items) self.request.response.setHeader("Content-Type", "application/octet-stream") return results
class EEAReadabilityPlugin(BrowserView): """ EEAReadabilityPlugin """ def __init__(self, context, request): """ init """ self.anno = IAnnotations(context) self.context = context self.request = request self.key = 'readability_scores' def __call__(self): """ call """ form_values = json.loads(self.request.form.keys()[0]) scores = self.anno.setdefault(self.key, {}) for value in form_values: scores[value] = form_values[value] return "" def get_scores(self): """ get_scores """ scores = self.anno.get(self.key, {}) key_metrics = {'word_count': 0, 'sentence_count': 0, 'readability_value': 0} enabled_for = 0 for value in scores.values(): if not value.get('readability_value'): continue enabled_for += 1 key_metrics['word_count'] += int(value.get('word_count') or 0) key_metrics['sentence_count'] += value.get('sentence_count') or 0 key_metrics['readability_value'] += int(round(float(value.get( 'readability_value') or 0))) # make an average score when we have more than 1 text field for # which readability is enabled if enabled_for > 1: key_metrics['readability_value'] = "{0:.0f}".format( key_metrics['readability_value'] / enabled_for) if key_metrics['readability_value'] == 0: return {} return key_metrics
def get_futures(request): annotations = IAnnotations(request) annotations.setdefault(FUTURES_KEY, {}) return annotations.get(FUTURES_KEY)
def get_promises(request): alsoProvides(request, IContainsPromises) annotations = IAnnotations(request) annotations.setdefault(PROMISES_KEY, {}) return annotations.get(PROMISES_KEY)
class VirtualTreeCategoryConfiguration(object): implements(interfaces.IVirtualTreeCategoryConfiguration) adapts(IPloneSiteRoot) def __init__(self, context): self.context = context self.ann = IAnnotations(context) # category set here as root is not exposed to the public self.storage = self.ann.setdefault(VTC_ANNOTATIONS_KEY, Category("root-node", "Root")) def get_enabled(self): return self.ann.get(VTC_ENABLED_ANNOTATIONS_KEY, False) def set_enabled(self, value): self.ann[VTC_ENABLED_ANNOTATIONS_KEY] = value enabled = property(get_enabled, set_enabled) def _find_node(self, category_path): """ Returns node in path or root node """ if category_path == "/": # normalize root category category_path = "" if not isinstance(category_path, ListTypes): path = category_path.split(CATEGORY_SPLITTER) else: path = category_path dpath = self.storage if category_path and path: # category_path may be empty string (root category) for item_id in path: if item_id: dpath = dpath.get(item_id, None) if dpath is None: return None return dpath def list_categories(self, path): """ List categories on the specified path only. """ node = self._find_node(path) if node is not None: return node.values() else: return [] def list_keywords(self, path, recursive=False): """ List keywords assigned to specified category """ result = set() node = self._find_node(path) if node is not None: result.update(node.keywords) if recursive: for category in node.values(): result.update(self.list_keywords(category.path, recursive=True)) # do not return set, it is not json serializable return list(result) def add_category(self, category_path, category_name): node = self._find_node(category_path) norm = getUtility(IIDNormalizer) category_id = norm.normalize(safe_unicode(category_name)) if node.get(category_id, None) is not None: Error = interfaces.VirtualTreeCategoriesError raise Error("Category already exists") else: node[category_id] = Category(category_id, category_name) logger.info("Category %s (%s) added" % (category_name, category_id)) return category_id def category_tree(self): def add_subkeys(node): res = [] for k, category in node.items(): item = dict( attributes=dict(id=category.id, rel="folder"), state="closed", data=category.title, children=add_subkeys(category), ) res.append(item) return res return add_subkeys(self.storage) def remove_category(self, category_path): node = self._find_node(category_path) if node is not None: parent = node.__parent__ del parent[node.id] logger.info("Category %s (%s) removed" % (node.title, node.id)) del node return True else: return False def rename_category(self, category_path, old_category_id, new_name): node = self._find_node(category_path) if node is not None: parent = node.__parent__ norm = getUtility(IIDNormalizer) new_id = norm.normalize(safe_unicode(new_name)) node.id = new_id node.title = new_name # Fix order of items # (be sure item is on the same position as before) # Copy previous order but replace old_category_id with new id # Finally updateOrder of the parent's items new_order = [] for item in list(parent.keys()): if item == old_category_id: new_order.append(new_id) else: new_order.append(item) del parent[old_category_id] parent[new_id] = node parent.updateOrder(new_order) logger.info("Category %s renamed to %s (%s)" % (old_category_id, new_name, new_id)) return new_id else: return False def set(self, category_path, keywords): node = self._find_node(category_path) if node is not None: node.keywords = keywords logger.info("Keywords for category %s set to %r" % (node.title, keywords)) return True else: return False def get(self, category_path): return self.list_keywords(category_path, recursive=False) def by_keyword(self, keyword=None): def process_subkeys(node): res = {} for k, category in node.items(): if keyword is None: for kw in category.keywords: if kw not in res: res[kw] = [] res[kw].append(category.path) elif keyword in category.keywords: if keyword not in res: res[keyword] = [] res[keyword].append(category.path) res.update(process_subkeys(category)) return res result = process_subkeys(self.storage) if keyword is not None: cats = result.get(keyword, []) return {keyword: cats} else: return result
def handleSaveImport(self, action): """Create and handle form button "Save and Import".""" # Extract form field values and errors from HTTP request data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return False import_file = data["import_file"] if not import_file: raise WidgetActionExecutionError('import_file', Invalid(_(u"Please provide a csv file to import"))) return # File upload is not saved in settings file_resource = import_file.data file_name = import_file.filename if not (import_file.contentType.startswith("text/") or \ import_file.contentType.startswith("application/csv")): raise WidgetActionExecutionError('import_file', Invalid(_(u"Please provide a file of type CSV"))) return if import_file.contentType.startswith("application/vnd.ms-excel"): raise WidgetActionExecutionError('import_file', Invalid(_(u"Please convert your Excel file to CSV first"))) return if data["object_type"] in ['__ignore__', '__stop__']: create_new = False object_type = None else: create_new = True object_type = data["object_type"] # list all the dexterity types #dx_types = get_portal_types(self.request) #log.debug(dx_types) # based from the types, display all the fields # fields = get_schema_info(CREATION_TYPE) # log.debug(fields) # blank header or field means we don't want it header_mapping = [d for d in data['header_mapping'] if d['field'] and d['header']] matching_headers = dict([(d['field'],d['header']) for d in header_mapping]) if create_new and not(matching_headers.get('id') or matching_headers.get('title')): raise WidgetActionExecutionError('header_mapping', Invalid(_(u"If creating new content you need either 'Short Name" u" or 'Title' in your data."))) return if not matching_headers: raise WidgetActionExecutionError('header_mapping', Invalid(_(u"You must pick which fields should contain your data"))) return primary_key = data["primary_key"] if primary_key and not matching_headers.get(primary_key): raise WidgetActionExecutionError('primary_key', Invalid(_(u"Must be a field selected in Header Mapping"))) return # based from the matching fields, get all the values. matching_fields = dict([(d['header'],d['field']) for d in header_mapping]) import_metadata = dexterity_import( self.context, file_resource, matching_fields, object_type, create_new, primary_key ) existing_count = import_metadata["existing_count"] new_count = import_metadata["new_count"] ignore_count = import_metadata["ignore_count"] api.portal.show_message( message=_("import_message_csv_info", # nopep8 default=u"""${new_num} items added, ${existing_num} items updated and ${ignore_num} items skipped from ${filename}""", mapping={"new_num": new_count, "existing_num": existing_count, "ignore_num": ignore_count, "filename": file_name}), request=self.request, type="info") self.import_metadata = import_metadata # Save our sucessful settings to save time next import annotations = IAnnotations(self.context) settings = annotations.setdefault(KEY, {}) settings['header_list'] = [d['header'] for d in header_mapping] # we will keep making this bigger in case they switch between several CSVs settings.setdefault("matching_fields",{}).update(matching_fields) settings['primary_key'] = primary_key settings['object_type'] = object_type return True
def __init__(self, context): self.context = context annotations = IAnnotations(context) self._md = annotations.setdefault(KEY, PersistentDict())
def __init__(self, context): self.context = context annotations = IAnnotations(context) self._md = annotations.setdefault(KEY, PersistentDict())
def handleSaveImport(self, action): """Create and handle form button "Save and Import".""" # Extract form field values and errors from HTTP request data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return False import_file = data["import_file"] if not import_file: raise WidgetActionExecutionError( 'import_file', Invalid(_(u"Please provide a csv file to import"))) return # File upload is not saved in settings file_resource = import_file.data file_name = import_file.filename if not (import_file.contentType.startswith("text/") or \ import_file.contentType.startswith("application/csv")): raise WidgetActionExecutionError( 'import_file', Invalid(_(u"Please provide a file of type CSV"))) return if import_file.contentType.startswith("application/vnd.ms-excel"): raise WidgetActionExecutionError( 'import_file', Invalid(_(u"Please convert your Excel file to CSV first"))) return if data["object_type"] in ['__ignore__', '__stop__']: create_new = False object_type = None else: create_new = True object_type = data["object_type"] # list all the dexterity types #dx_types = get_portal_types(self.request) #log.debug(dx_types) # based from the types, display all the fields # fields = get_schema_info(CREATION_TYPE) # log.debug(fields) # blank header or field means we don't want it header_mapping = [ d for d in data['header_mapping'] if d['field'] and d['header'] ] matching_headers = dict([(d['field'], d['header']) for d in header_mapping]) if create_new and not (matching_headers.get('id') or matching_headers.get('title')): raise WidgetActionExecutionError( 'header_mapping', Invalid( _(u"If creating new content you need either 'Short Name" u" or 'Title' in your data."))) return if not matching_headers: raise WidgetActionExecutionError( 'header_mapping', Invalid( _(u"You must pick which fields should contain your data"))) return primary_key = data["primary_key"] if primary_key and not matching_headers.get(primary_key): raise WidgetActionExecutionError( 'primary_key', Invalid(_(u"Must be a field selected in Header Mapping"))) return # based from the matching fields, get all the values. matching_fields = dict([(d['header'], d['field']) for d in header_mapping]) import_metadata = dexterity_import(self.context, file_resource, matching_fields, object_type, create_new, primary_key) existing_count = import_metadata["existing_count"] new_count = import_metadata["new_count"] ignore_count = import_metadata["ignore_count"] api.portal.show_message( message=_( "import_message_csv_info", # nopep8 default=u"""${new_num} items added, ${existing_num} items updated and ${ignore_num} items skipped from ${filename}""", mapping={ "new_num": new_count, "existing_num": existing_count, "ignore_num": ignore_count, "filename": file_name }), request=self.request, type="info") self.import_metadata = import_metadata # Save our sucessful settings to save time next import annotations = IAnnotations(self.context) settings = annotations.setdefault(KEY, {}) settings['header_list'] = [d['header'] for d in header_mapping] # we will keep making this bigger in case they switch between several CSVs settings.setdefault("matching_fields", {}).update(matching_fields) settings['primary_key'] = primary_key settings['object_type'] = object_type return True
def _storage(self): annotations = IAnnotations(self.context) return annotations.setdefault(SYNC_METADATA_KEY, default=OOBTree())
def load(self): annotations = IAnnotations(self.context) return annotations.setdefault(BLOCK_ANNOTATION_KEY, PersistentMapping())
def load(self): annotations = IAnnotations(self.context) return deepcopy(annotations.setdefault( SL_ANNOTATION_KEY, make_resursive_persistent(self._default_page_config())))
def _storage(self): annotations = IAnnotations(self.context) return annotations.setdefault(SYNC_METADATA_KEY, default=OOBTree())