def __call__(self, context):
     """list available reference types for use in schema field
     """
     bib_tool = getToolByName(context, 'portal_bibliography')
     terms = [SimpleTerm(baseNormalize(ref_type),
                         baseNormalize(ref_type),
                         ref_type)
              for ref_type in bib_tool.getReferenceTypes()]
     return SimpleVocabulary(terms)
 def __call__(self, context):
     """list available reference types for use in schema field
     """
     bib_tool = getToolByName(context, 'portal_bibliography')
     terms = [
         SimpleTerm(baseNormalize(ref_type), baseNormalize(ref_type),
                    ref_type) for ref_type in bib_tool.getReferenceTypes()
     ]
     return SimpleVocabulary(terms)
def localRoles(context):
    roles = [
        SimpleTerm(
            baseNormalize(role),
            baseNormalize(role),
            _(role),
        ) for role in sorted(context.valid_roles())
    ]
    return SimpleVocabulary(roles)
Esempio n. 4
0
 def __call__(self, context):
     registry = component.queryUtility(IRegistry)
     if registry is None:
         return []
     categories = registry[self.key]
     terms = [
         SimpleTerm(baseNormalize(category), baseNormalize(category),
                    self.translate(category)) for category in categories
     ]
     return SimpleVocabulary(terms)
Esempio n. 5
0
    def __call__(self, context):

        registry = component.queryUtility(IRegistry)
        if registry is None:
            return []
        categories = registry[self.key]
        terms = [SimpleTerm(baseNormalize(category),
                            baseNormalize(category),
                            category) for category in categories]
        return SimpleVocabulary(terms)
 def __call__(self, context):
     icons = []
     if self.key in ICONS:
         icons = ICONS[self.key]
     registry = component.queryUtility(IRegistry)
     if registry is None:
         return icons
     icons += registry.get('collective.socialicons.%s' % self.key, [])
     terms = [SimpleTerm(baseNormalize(icon),
                         baseNormalize(icon),
                         icon) for icon in icons]
     return SimpleVocabulary(terms)
 def __call__(self, context):
     icons = []
     if self.key in ICONS:
         icons = ICONS[self.key]
     registry = component.queryUtility(IRegistry)
     if registry is None:
         return icons
     icons += registry.get('collective.socialicons.%s' % self.key, [])
     terms = [
         SimpleTerm(baseNormalize(icon), baseNormalize(icon), icon)
         for icon in icons
     ]
     return SimpleVocabulary(terms)
Esempio n. 8
0
def groupTypes(context):
    """Get content types addable in a specific context"""
    portal_types = getToolByName(context, 'portal_types')
    types = portal_types.listContentTypes()
    types = [t for t in types if context.getTypeInfo().allowType(t)]
    terms = [
        SimpleTerm(
            baseNormalize(t),
            baseNormalize(t),
            _(portal_types[t].title)
        ) for t in sorted(types)
    ]
    return SimpleVocabulary(terms)
 def __call__(self, context, key=None):
     if not key: key=self.key
     registry = component.queryUtility(IRegistry)
     if registry is None: return []
     categories = registry[key]
     if not categories: categories = []
     categories = [utils.magicstring(c.strip()).decode('utf-8')
                   for c in categories]
     terms = [SimpleTerm(baseNormalize(category),
                         baseNormalize(category),
                         category)
              for category in uniquify(categories)]
     return SimpleVocabulary(terms)
Esempio n. 10
0
def contentVocabulary(context):
    sm = getSecurityManager()
    catalog = getToolByName(context, 'portal_catalog')
    registry = getUtility(IRegistry)
    settings = registry.forInterface(IReaditlaterSettings)
    folder_query = json.loads(settings.folder_query)
    brains = catalog.searchResults(**folder_query)
    terms = []
    for brain in brains:
        if sm.checkPermission('Add portal content', brain.getObject()):
            terms.append(
                SimpleTerm(baseNormalize(brain.UID), baseNormalize(brain.UID),
                           brain.Title.decode('utf-8')))
    return SimpleVocabulary(terms)
Esempio n. 11
0
 def __call__(self, context):
     assert self.key is not None
     if isinstance(context, dict):
         context = context.get('context', None)
     values = infolder_keywords(context, self.key)
     values = uniquify(
         [magicstring(a.strip()).decode('utf-8')
          for a in values])
     values.sort()
     values = [
         SimpleTerm(baseNormalize(category).strip(),
                    baseNormalize(category).strip(),
                    category) for category in uniquify(values)]
     return SimpleVocabulary(values)
Esempio n. 12
0
 def __call__(self, context):
     assert self.key is not None
     if isinstance(context, dict):
         context = context.get('context', None)
     values = infolder_keywords(context, self.key)
     values = uniquify(
         [magicstring(a.strip()).decode('utf-8')
          for a in values])
     values.sort()
     values = [
         SimpleTerm(baseNormalize(category).strip(),
                    baseNormalize(category).strip(),
                    category) for category in uniquify(values)]
     return SimpleVocabulary(values)
Esempio n. 13
0
    def get_entries(self):
        """Get glossary entries and keep them in the desired format"""

        catalog = api.portal.get_tool('portal_catalog')
        path = '/'.join(self.context.getPhysicalPath())
        query = dict(portal_type='Term', path={'query': path, 'depth': 1})

        items = {}
        for brain in catalog(**query):
            obj = brain.getObject()
            index = baseNormalize(obj.title)[0].upper()
            if index not in items:
                items[index] = []
            scales = obj.unrestrictedTraverse('@@images')
            image = scales.scale('image', scale='tile')  # 64x64
            item = {
                'title': obj.title,
                'description': obj.description,
                'image': image,
            }
            items[index].append(item)

        language = api.portal.get_current_language()
        collator = zope.ucol.Collator(str(language))

        for k in items:
            items[k] = sorted(items[k], key=lambda term: collator.key(safe_unicode(term['title'])))

        return items
Esempio n. 14
0
    def normalize(self, text, locale=None, max_length=MAX_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt_BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IIDNormalizer, name=locale)
            parts = locale.split('_')
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IIDNormalizer, name=parts[0])
            if util is not None:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # lowercase text
        text = text.lower()

        text = IGNORE_REGEX.sub('', text)
        text = NON_WORD_REGEX.sub('-', text)
        text = MULTIPLE_DASHES_REGEX.sub('-', text)
        text = EXTRA_DASHES_REGEX.sub('', text)

        return cropName(text, maxLength=max_length)
def customers_vocab_for(user=None):
    """Customers vocabulary for given or currently authenticated user.
    """
    # XXX: expect context as argument
    context = getSite()
    order_uids = get_vendor_order_uids_for(context, user=user)
    res = set(get_order(context, uid).attrs['creator'] for uid in order_uids)
    vocab = []
    for creator in res:
        if not creator:
            # Development edge case: creator might be None
            continue
        customer = plone.api.user.get(userid=creator)

        email = None
        name = None
        if customer:
            # soft dep on bda.plone.shop
            first = safe_unicode(customer.getProperty('firstname', ''))
            last = safe_unicode(customer.getProperty('lastname', ''))
            email = safe_unicode(customer.getProperty('email', ''))
            # fallback
            full = safe_unicode(customer.getProperty('fullname', ''))
            name = u'{0}, {1}'.format(last, first) if (first or last) else full

        if email and name:
            title = u'{0} ({1}) - {2}'.format(name, creator, email)
        else:
            title = creator
        vocab.append((creator, title))

    # Sort the vocab by title, normalized like sortable_title
    vocab = sorted(vocab, key=lambda x: baseNormalize(x[1]).lower())

    return vocab
Esempio n. 16
0
def customers_vocab_for(user=None):
    """Customers vocabulary for given or currently authenticated user.
    """
    # XXX: expect context as argument
    context = getSite()
    order_uids = get_vendor_order_uids_for(context, user=user)
    res = set(get_order(context, uid).attrs['creator'] for uid in order_uids)
    vocab = []
    for creator in res:
        if not creator:
            # Development edge case: creator might be None
            continue
        customer = plone.api.user.get(userid=creator)

        email = None
        name = None
        if customer:
            # soft dep on bda.plone.shop
            first = safe_unicode(customer.getProperty('firstname', ''))
            last = safe_unicode(customer.getProperty('lastname', ''))
            email = safe_unicode(customer.getProperty('email', ''))
            # fallback
            full = safe_unicode(customer.getProperty('fullname', ''))
            name = u'{0}, {1}'.format(last, first) if (first or last) else full

        if email and name:
            title = u'{0} ({1}) - {2}'.format(name, creator, email)
        else:
            title = creator
        vocab.append((creator, title))

    # Sort the vocab by title, normalized like sortable_title
    vocab = sorted(vocab, key=lambda x: baseNormalize(x[1]).lower())

    return vocab
Esempio n. 17
0
def mapUnicode(text, mapping=()):
    """
    This method is used for replacement of special characters found in a
    mapping before baseNormalize is applied.
    """
    res = []
    word = u''
    for ch in text:
        ordinal = ord(ch)
        # split english word
        if ordinal < 128:
            word += ch
            continue
        elif word and not word.isspace():
            res.append(word.strip())
            word = u''

        if ordinal in mapping:
            # try to apply custom mappings
            res.append(mapping.get(ordinal).strip())
        else:
            # else leave untouched
            res.append(ch)
    else:
        if word and not word.isspace():
            res.append(word.strip())
    # always apply base normalization
    return baseNormalize(u'-'.join(res))
def contentVocabulary(context):
    sm = getSecurityManager()
    catalog = getToolByName(context, 'portal_catalog')
    registry = getUtility(IRegistry)
    settings = registry.forInterface(IReaditlaterSettings)
    folder_query = json.loads(settings.folder_query)
    brains = catalog.searchResults(**folder_query)
    terms = []
    for brain in brains:
        if sm.checkPermission('Add portal content', brain.getObject()):
            terms.append(SimpleTerm(
                baseNormalize(brain.UID),
                baseNormalize(brain.UID),
                brain.Title.decode('utf-8')
            ))
    return SimpleVocabulary(terms)
Esempio n. 19
0
 def normalize(self, text, locale=None, max_length=None):
     """
     Returns a normalized text. text has to be a unicode string.
     """
     slug = slugify(text)
     # always apply base normalization
     return baseNormalize(slug)
Esempio n. 20
0
    def normalize(self, text, locale=None, max_length=None):
        """
        Returns a normalized text. text has to be a unicode string.
        """
        mapped = zh_normalizer(text, mapping=mapping)

        return baseNormalize(mapped)
Esempio n. 21
0
    def normalize(self, text, locale=None, max_length=MAX_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt-BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IIDNormalizer, name=locale)
            parts = LOCALE_SPLIT_REGEX.split(locale)
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IIDNormalizer, name=parts[0])
            # be defensive: if queryUtility() returns an instance of the same
            # normalizer class as this one, we'll loop forever until
            # "RuntimeError: maximum recursion depth exceeded" (ticket #11630)
            if util is not None and util.__class__ is not self.__class__:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # lowercase text
        text = text.lower()

        text = IGNORE_REGEX.sub('', text)
        text = NON_WORD_REGEX.sub('-', text)
        text = MULTIPLE_DASHES_REGEX.sub('-', text)
        text = EXTRA_DASHES_REGEX.sub('', text)

        return cropName(text, maxLength=max_length)
Esempio n. 22
0
    def normalize(self, text, locale=None, max_length=MAX_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt-BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IIDNormalizer, name=locale)
            parts = LOCALE_SPLIT_REGEX.split(locale)
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IIDNormalizer, name=parts[0])
            # be defensive: if queryUtility() returns an instance of the same
            # normalizer class as this one, we'll loop forever until
            # "RuntimeError: maximum recursion depth exceeded" (ticket #11630)
            if util is not None and util.__class__ is not self.__class__:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # lowercase text
        text = text.lower()

        text = IGNORE_REGEX.sub('', text)
        text = NON_WORD_REGEX.sub('-', text)
        text = MULTIPLE_DASHES_REGEX.sub('-', text)
        text = EXTRA_DASHES_REGEX.sub('', text)

        return cropName(text, maxLength=max_length)
Esempio n. 23
0
    def get_entries(self):
        """Get glossary entries and keep them in the desired format"""

        catalog = api.portal.get_tool('portal_catalog')
        path = '/'.join(self.context.getPhysicalPath())
        query = dict(portal_type='Term', path={'query': path, 'depth': 1})

        items = {}
        for brain in catalog(**query):
            obj = brain.getObject()
            index = baseNormalize(obj.title)[0].upper()
            if index not in items:
                items[index] = []
            scales = obj.unrestrictedTraverse('@@images')
            image = scales.scale('image', scale='tile')  # 64x64
            item = {
                'title': obj.title,
                'description': obj.description,
                'image': image,
            }
            items[index].append(item)

        language = api.portal.get_current_language()
        collator = zope.ucol.Collator(str(language))

        for k in items:
            items[k] = sorted(
                items[k],
                key=lambda term: collator.key(safe_unicode(term['title'])))

        return items
Esempio n. 24
0
def letterIndexer(context):
    terms = []
    if context.title:
        terms.append(context.title)
    if context.variants:
        terms.extend(list(context.variants))
    normalized = [baseNormalize(term)[0].upper() for term in terms if term]
    normalized = list(set(normalized))
    return normalized
Esempio n. 25
0
    def _list_results(self):
        """Terms list (brains) depending on the request"""

        search_letter = self.search_letter
        if search_letter:
            search_letter = search_letter.upper()

        if search_letter == "[0-9]":
            search_letter = tuple(string.digits)

        common = {
            "context": self.context,
            "depth": 1,
            "portal_type": "GlossaryTerm",
        }

        if search_letter:
            results = api.content.find(letter=search_letter, **common)
        elif self.search_text:
            results = api.content.find(SearchableText=self.search_text,
                                       **common)
            # We redirect to the result if unique
            if len(results) == 1:
                target = results[0].getURL()
                raise Redirect(target)
        else:
            # Viewing all terms
            results = api.content.find(**common)
        results = list(results)
        variant_results = []
        # create a list of tuples with the sort key as the first item
        for brain in results:
            for variant in brain["variants"]:
                sortable_variant = baseNormalize(variant.upper())
                if (search_letter and isinstance(search_letter, str)
                        and sortable_variant[0] != search_letter):
                    continue
                if (search_letter and isinstance(search_letter, tuple)
                        and sortable_variant[0] not in search_letter):
                    continue
                variant_results.append((
                    sortable_variant,
                    {
                        "title": variant,
                        "brain": brain,
                        "letter": sortable_variant[0],
                    },
                ))
        variant_results = sorted(variant_results, key=lambda r: r[0])
        return tuple([r[1] for r in variant_results])
Esempio n. 26
0
    def normalize(self, text, locale=None, max_length=MAX_URL_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt-BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IURLNormalizer, name=locale)
            parts = LOCALE_SPLIT_REGEX.split(locale)
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IURLNormalizer, name=parts[0])
            # be defensive: if queryUtility() returns an instance of the same
            # normalizer class as this one, we'll loop forever until
            # "RuntimeError: maximum recursion depth exceeded" (ticket #11630)
            if util is not None and util.__class__ is not self.__class__:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # Remove any leading underscores
        m = UNDERSCORE_START_REGEX.match(text)
        if m is not None:
            text = m.groups()[1]

        # lowercase text
        base = text.lower()
        ext = ''

        m = FILENAME_REGEX.match(base)
        if m is not None:
            base = m.groups()[0]
            ext = m.groups()[1]

        base = IGNORE_REGEX.sub('', base)
        base = NON_WORD_REGEX.sub('-', base)
        base = URL_DANGEROUS_CHARS_REGEX.sub('-', base)
        base = EXTRA_DASHES_REGEX.sub('', base)
        base = MULTIPLE_DASHES_REGEX.sub('-', base)

        base = cropName(base, maxLength=max_length)

        if ext != '':
            base = base + '.' + ext

        return base
Esempio n. 27
0
    def normalize(self, text, locale=None, max_length=MAX_URL_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt-BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IURLNormalizer, name=locale)
            parts = LOCALE_SPLIT_REGEX.split(locale)
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IURLNormalizer, name=parts[0])
            # be defensive: if queryUtility() returns an instance of the same
            # normalizer class as this one, we'll loop forever until
            # "RuntimeError: maximum recursion depth exceeded" (ticket #11630)
            if util is not None and util.__class__ is not self.__class__:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # Remove any leading underscores
        m = UNDERSCORE_START_REGEX.match(text)
        if m is not None:
            text = m.groups()[1]

        # lowercase text
        base = text.lower()
        ext = ''

        m = FILENAME_REGEX.match(base)
        if m is not None:
            base = m.groups()[0]
            ext = m.groups()[1]

        base = IGNORE_REGEX.sub('', base)
        base = NON_WORD_REGEX.sub('-', base)
        base = URL_DANGEROUS_CHARS_REGEX.sub('-', base)
        base = EXTRA_DASHES_REGEX.sub('', base)
        base = MULTIPLE_DASHES_REGEX.sub('-', base)

        base = cropName(base, maxLength=max_length)

        if ext != '':
            base = base + '.' + ext

        return base
Esempio n. 28
0
    def normalize(self, text, locale=None, max_length=MAX_FILENAME_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt_BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IFileNameNormalizer, name=locale)
            parts = locale.split('_')
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IFileNameNormalizer, name=parts[0])
            if util is not None:
                text = util.normalize(text, locale=locale)

        # Preserve filename extensions
        text = baseNormalize(text)

        # Remove any leading underscores
        m = UNDERSCORE_START_REGEX.match(text)
        if m is not None:
            text = m.groups()[1]

        base = text
        ext  = ''

        m = FILENAME_REGEX.match(text)
        if m is not None:
            base = m.groups()[0]
            ext  = m.groups()[1]

        base = IGNORE_REGEX.sub('', base)
        base = DANGEROUS_CHARS_REGEX.sub('-', base)
        base = EXTRA_DASHES_REGEX.sub('', base)
        base = MULTIPLE_DASHES_REGEX.sub('-', base)

        base = cropName(base, maxLength=max_length)

        if ext != '':
            base = base + '.' + ext

        return base
Esempio n. 29
0
    def process(self, lst):
        enc = 'utf-8'
        result = []
        for s in lst:
            try:
                if not isinstance(s, str):
                    s = s.decode(enc)
            except (UnicodeDecodeError, TypeError):
                pass

            if 0x41 <= ord(s[0]) <= 0x24F:
                # normalize latin words
                # words beginning with a latin character
                # are commonly latin words
                s = baseNormalize(s).lower()

            result.append(s.lower())

        return result
Esempio n. 30
0
    def process(self, lst):
        enc = 'utf-8'
        result = []
        for s in lst:
            try:
                if not isinstance(s, unicode):
                    s = unicode(s, enc)
            except (UnicodeDecodeError, TypeError):
                pass

            if 0x41 <= ord(s[0]) <= 0x24F:
                # normalize latin words
                # words beginning with a latin character
                # are commonly latin words
                s = baseNormalize(s).lower()

            result.append(s.lower())

        return result
Esempio n. 31
0
    def normalize(self, text, locale=None, max_length=MAX_URL_LENGTH):
        """
        Returns a normalized text. text has to be a unicode string and locale
        should be a normal locale, for example: 'pt_BR', 'sr@Latn' or 'de'
        """
        if locale is not None:
            # Try to get a normalizer for the locale
            util = queryUtility(IURLNormalizer, name=locale)
            parts = locale.split('_')
            if util is None and len(parts) > 1:
                # Try to get a normalizer for the base language if we asked
                # for one for a language/country combination and found none
                util = queryUtility(IURLNormalizer, name=parts[0])
            if util is not None:
                text = util.normalize(text, locale=locale)

        text = baseNormalize(text)

        # lowercase text
        base = text.lower()
        ext  = ''

        m = FILENAME_REGEX.match(base)
        if m is not None:
            base = m.groups()[0]
            ext  = m.groups()[1]

        base = NON_WORD_REGEX.sub('-', base)
        base = IGNORE_REGEX.sub('', base)
        base = URL_DANGEROUS_CHARS_REGEX.sub('-', base)
        base = EXTRA_DASHES_REGEX.sub('', base)
        base = MULTIPLE_DASHES_REGEX.sub('-', base)

        base = cropName(base, maxLength=max_length)

        if ext != '':
            base = base + '.' + ext

        return base
Esempio n. 32
0
def mapUnicode(text, mapping=()):
    """
    NOET: rewrite by andelf to insert '-' between characters or english words.
    This method is used for replacement of special characters found in a
    mapping before baseNormalize is applied.
    """
    res = []
    word = u''                          # handle english words
    for ch in text:
        ordinal = ord(ch)
        if ordinal< 127 and ch.isalnum():
            word += ch
            continue
        elif word:
            res.append(word)
            word = u''
        if ordinal in mapping:
            res.append(mapping.get(ordinal))
        else:
            res.append(ch)
    res.append(word)        
    res = filter(lambda u: u and not u.isspace(), res)
    # always apply base normalization
    return baseNormalize(u'-'.join(res))
 def getFirstLetter(self, term):
     """ returns first letter """
     uterm = safe_unicode(term)
     return baseNormalize(uterm[0:1]).encode('utf-8')
Esempio n. 34
0
 def getFirstLetter(self, term):
     """ returns first letter """
     uterm = safe_unicode(term)
     return baseNormalize(uterm[0:1]).encode('utf-8')
Esempio n. 35
0
 def get_mode(self):
     return baseNormalize(self.config.belowmode).lower().replace(' ', '-')
Esempio n. 36
0
 def get_mode(self):
     return baseNormalize(self.config.belowmode).lower().replace(' ','-')