Esempio n. 1
0
 def css_class(self):
     """ Widget specific css class
     """
     css_type = self.widget_type
     css_title = normalizer.normalize(self.data.title)
     return ('faceted-minaraad-types-widget '
             'faceted-{0}-widget section-{1}').format(css_type, css_title)
Esempio n. 2
0
 def css_class(self):
     """ Widget specific css class
     """
     css_type = self.widget_type
     css_title = normalizer.normalize(self.data.title)
     return ('faceted-multiselect-widget '
             'faceted-{0}-widget section-{1}').format(css_type, css_title)
Esempio n. 3
0
    def normalize(self, text, locale=None, max_length=MAX_URL_LENGTH,
                  max_words=MAX_URL_WORDS, orphans=URL_ORPHANS):
        """
        Override plone.i18n URLNormalizer to accept cutting by words.
        """
        if not isinstance(text, unicode):
            try:
                text = text.decode('utf-8')
            except Exception:
                logger.info("Can't decode URL to be normalized")

        text = urlnormalizer.normalize(text, locale, max_length)
        if not max_words:
            return text

        m = FILENAME_REGEX.match(text)
        if m is not None:
            text = m.groups()[0]
            ext = m.groups()[1]
        else:
            ext = ''

        new_text = text.split('-')
        if len(new_text) <= max_words + orphans:
            new_text = text
        else:
            new_text = '-'.join(new_text[:max_words])

        if ext:
            new_text = '.'.join((new_text, ext))
        return new_text
Esempio n. 4
0
    def normalize(self,
                  text,
                  locale=None,
                  max_length=MAX_URL_LENGTH,
                  max_words=MAX_URL_WORDS,
                  orphans=URL_ORPHANS):
        """
        Override plone.i18n URLNormalizer to accept cutting by words.
        """
        if not isinstance(text, unicode):
            try:
                text = text.decode('utf-8')
            except Exception:
                logger.info("Can't decode URL to be normalized")

        text = urlnormalizer.normalize(text, locale, max_length)
        if not max_words:
            return text

        m = FILENAME_REGEX.match(text)
        if m is not None:
            text = m.groups()[0]
            ext = m.groups()[1]
        else:
            ext = ''

        new_text = text.split('-')
        if len(new_text) <= max_words + orphans:
            new_text = text
        else:
            new_text = '-'.join(new_text[:max_words])

        if ext:
            new_text = '.'.join((new_text, ext))
        return new_text
Esempio n. 5
0
File: util.py Progetto: dnouri/Kotti
def title_to_name(title):
    request = get_current_request()
    if request is not None:
        locale_name = get_locale_name(request)
    else:
        locale_name = 'en'
    return unicode(urlnormalizer.normalize(title, locale_name, max_length=40))
Esempio n. 6
0
 def css_class(self):
     """ Widget specific css class
     """
     css_type = self.widget_type
     css_title = normalizer.normalize(self.data.title)
     return 'faceted-{0}-widget section-{1}{2}'.format(
         css_type, css_title, self.custom_css)
Esempio n. 7
0
File: util.py Progetto: dnouri/Kotti
def title_to_name(title):
    request = get_current_request()
    if request is not None:
        locale_name = get_locale_name(request)
    else:
        locale_name = "en"
    return unicode(urlnormalizer.normalize(title, locale_name, max_length=40))
Esempio n. 8
0
def title_to_name(title, blacklist=()):
    request = get_current_request()
    if request is not None:
        locale_name = get_locale_name(request)
    else:
        locale_name = 'en'
    name = unicode(urlnormalizer.normalize(title, locale_name, max_length=40))
    while name in blacklist:
        name = disambiguate_name(name)
    return name
Esempio n. 9
0
def title_to_name(title, blacklist=()):
    request = get_current_request()
    if request is not None:
        locale_name = get_locale_name(request)
    else:
        locale_name = 'en'
    name = unicode(urlnormalizer.normalize(title, locale_name, max_length=40))
    while name in blacklist:
        name = disambiguate_name(name)
    return name
 def __iter__(self):
     for item in self.previous:
         sourcekey = self.sourcekey(*item.keys())[0]
         if not sourcekey:   # not enough info to return a sensible id key
             yield item; continue
         
         # Get the information we require for normalization
         keywords = dict(text=item[sourcekey], locale=self.locale(item))
         # Perform Normalization
         source_norm = normalizer.normalize(**keywords)
         item[self.destinationkey(item)] = source_norm
         
         yield item
Esempio n. 11
0
def data_recovery(filename, image, container, status):
    csv_file = open(filename, "rb")
    reader = csv.reader(csv_file, delimiter=";")
    reader.next()
    for line in reader:
        project_id = line[0]
        project_title = line[2]
        project_theme = line[1]
        project_body = line[18]
        project_author = line[3]
        project_mail = line[4]
        project_like = line[15]
        project_unlike = line[16]

        token_type = token_type_recovery(project_theme)

        if len(project_author) < 3:
            project_author = urlnormalizer.normalize(
                project_mail[0:3].decode("utf8"), locale="fr"
            )
        else:
            project_author = urlnormalizer.normalize(
                project_author.decode("utf8"), locale="fr"
            )

        add_project(
            container,
            project_id,
            project_title,
            token_type,
            project_body,
            image,
            project_author,
            project_like,
            project_unlike,
            status,
        )
        print(project_title)
    def __iter__(self):
        for item in self.previous:
            sourcekey = self.sourcekey(*item.keys())[0]
            if not sourcekey:  # not enough info to return a sensible id key
                yield item
                continue

            # Get the information we require for normalization
            keywords = dict(text=item[sourcekey], locale=self.locale(item))
            # Perform Normalization
            source_norm = normalizer.normalize(**keywords)
            item[self.destinationkey(item)] = source_norm

            yield item
Esempio n. 13
0
def data_recovery(filename, image, container, status):
    csv_file = open(filename, "rb")
    reader = csv.reader(csv_file, delimiter=";")
    reader.next()
    for line in reader:
        project_id = line[0]
        project_title = line[2]
        project_theme = line[1]
        project_body = line[18]
        project_author = line[3]
        project_mail = line[4]
        project_like = line[15]
        project_unlike = line[16]

        token_type = token_type_recovery(project_theme)

        if len(project_author) < 3:
            project_author = urlnormalizer.normalize(
                project_mail[0:3].decode("utf8"), locale="fr")
        else:
            project_author = urlnormalizer.normalize(
                project_author.decode("utf8"), locale="fr")

        add_project(
            container,
            project_id,
            project_title,
            token_type,
            project_body,
            image,
            project_author,
            project_like,
            project_unlike,
            status,
        )
        print(project_title)
Esempio n. 14
0
    def __iter__(self):
        for item in self.previous:
            elem = item["elem"]
            item["link"] = elem.findtext("link", default="")
            item["title"] = elem.findtext("title", default="")
            normalized_title = normalizer.normalize(item["title"])

            wpid = elem.findtext(".//{%s}post_name" % xmlns_wp, default="")
            #            item['id'] = wpid and wpid or normalized_title
            item["_id"] = str(normalized_title)

            wptype = elem.findtext(".//{%s}post_type" % xmlns_wp, default="")
            item["_type"] = typesMap.has_key(wptype) and typesMap[wptype] or typesMap["default"]

            wpstatus = elem.findtext(".//{%s}status" % xmlns_wp, default="")
            item["_transitions"] = workflowMap.has_key(wpstatus) and workflowMap[wpstatus] or workflowMap["default"]
            item["subject"] = [x.text for x in elem.findall("category")]
            creation_date = elem.findtext(".//{%s}post_date_gmt" % xmlns_wp, default="")

            pubDate = elem.findtext("pubDate", default="")
            item["effectiveDate"] = DateTime(pubDate)

            if creation_date and wptype == "post":
                dt = DateTime(creation_date)
                item["creation_date"] = dt
                item["_path"] = "%(year)s/%(month)s/%(day)s/%(title)s" % {
                    "year": dt.year(),
                    "month": dt.mm(),
                    "day": dt.dd(),
                    "title": normalized_title,
                }
            else:
                item["_path"] = normalized_title

            item["_path"] = "/".join((self.base_path, item["_path"]))
            item["_path"] = str(item["_path"])
            item["description"] = elem.findtext(".//{%s}encoded" % xmlns_excerpt, default="")
            item["creators"] = elem.findtext(".//{%s}creator" % xmlns_dc, default="")
            item["text"] = elem.findtext(".//{%s}encoded" % xmlns_content, default="")
            item["attachment_url"] = elem.findtext(".//{%s}attachment_url" % xmlns_wp, default="")

            yield item
Esempio n. 15
0
 def css_class(self):
     """ Widget specific css class
     """
     css_type = self.widget_type
     css_title = normalizer.normalize(self.data.title)
     return ("faceted-checkboxes-widget " "faceted-{0}-widget section-{1}").format(css_type, css_title)
Esempio n. 16
0
 def css_class(self):
     """ Widget specific css class
     """
     css_type = self.widget_type #_css
     css_title = normalizer.normalize(self.data.title)
     return 'faceted-{0}-widget section-{1}'.format(css_type, css_title)