Пример #1
0
  def _get_url(self, action, repos_rev, change):
    # The parameters for the URLs generation need to be placed in the
    # parameters for the configuration module, otherwise we may get
    # KeyError exceptions.
    params = self.params.copy()
    params['path'] = change.path and urllib_parse_quote(change.path) or None
    params['base_path'] = change.base_path and urllib_parse_quote(change.base_path) \
                          or None
    params['rev'] = repos_rev
    params['base_rev'] = change.base_rev

    return self.cfg.get("diff_%s_url" % action, self.group, params)
  def canonical_name(self):
    """Return a canonical name for this contributor.  The canonical
    name may or may not be based on the contributor's actual email
    address.

    The canonical name will not contain filename-unsafe characters.

    This method is guaranteed to return the same canonical name every
    time only if no further contributions are recorded from this
    contributor after the first call.  This is because a contribution
    may bring a new form of the contributor's name, one which affects
    the algorithm used to construct canonical names."""
    retval = None
    if self.username:
      retval = self.username
    elif self.email:
      # Take some rudimentary steps to shorten the email address, to
      # make it more manageable.  If this is ever discovered to result
      # in collisions, we can always just use to the full address.
      try:
        at_posn = self.email.index('@')
        first_dot_after_at = self.email.index('.', at_posn)
        retval = self.email[0:first_dot_after_at]
      except ValueError:
        retval = self.email
    elif self.real_name:
      # Last resort: construct canonical name based on real name.
      retval = ''.join(self.real_name.lower().split(' '))
    if retval is None:
      complain('Unable to construct a canonical name for Contributor.', True)
    return urllib_parse_quote(retval, safe="!#$&'()+,;<=>@[]^`{}~")
Пример #3
0
    def __post_process(self):
        # fill in empty default value
        if not self.date:
            self.date = datetime.datetime.now(pytz.timezone(self.__timezone))
        if not self.updated:
            self.updated = self.date
            
        #Make sure dates have timezone info:
        if not self.date.tzinfo:
            pytz.timezone(self.__timezone).localize(self.date)
        if not self.updated.tzinfo:
            pytz.timezone(self.__timezone).localize(self.updated)

        if not self.title:
            self.title = "Untitled - {0}".format(self.date)
        if not self.slug:
            if config.slugify:
                #The use has provided their own slugify function, use that:
                self.slug = config.slugify(self)
            else:
                self.slug = create_slug(self.title)
            
        if not self.categories or len(self.categories) == 0:
            self.categories = set([Category('uncategorized')])
        if self.guid:
            uuid = urllib_parse_quote(self.guid) #used for expandling :uuid in permalink template code below
        else:
            self.guid = uuid = create_guid(self.title, self.date)
        if not self.permalink and \
                blog_config.auto_permalink.enabled:
            self.permalink = create_permalink(
                blog_config.auto_permalink.path, bf.config.site.url,
                blog_config.path, self.title, self.date, uuid, self.filename)
        
        logger.debug("Permalink: {0}".format(self.permalink))
def drop(revision_url_pattern):
  # Output the data.
  #
  # The data structures are all linked up nicely to one another.  You
  # can get all the LogMessages, and each LogMessage contains all the
  # Contributors involved with that commit; likewise, each Contributor
  # points back to all the LogMessages it contributed to.
  #
  # However, the HTML output is pretty simple right now.  It's not take
  # full advantage of all that cross-linking.  For each contributor, we
  # just create a file listing all the revisions contributed to; and we
  # build a master index of all contributors, each name being a link to
  # that contributor's individual file.  Much more is possible... but
  # let's just get this up and running first.

  for key in LogMessage.all_logs.keys():
    # You could print out all log messages this way, if you wanted to.
    pass
    # print LogMessage.all_logs[key]

  detail_subdir = "detail"
  if not os.path.exists(detail_subdir):
    os.mkdir(detail_subdir)

  index = open('index.html', 'w')
  index.write(html_header('Contributors as of r%d' % LogMessage.max_revnum))
  index.write(index_introduction)
  index.write('<ol>\n')
  # The same contributor appears under multiple keys, so uniquify.
  seen_contributors = { }
  # Sorting alphabetically is acceptable, but even better would be to
  # sort by number of contributions, so the most active people appear at
  # the top -- that way we know whom to look at first for commit access
  # proposals.
  sorted_contributors = sorted(Contributor.all_contributors.values())
  for c in sorted_contributors:
    if c not in seen_contributors:
      if c.score() > 0:
        if c.is_full_committer:
          # Don't even bother to print out full committers.  They are
          # a distraction from the purposes for which we're here.
          continue
        else:
          committerness = ''
          if c.is_committer:
            committerness = '&nbsp;(partial&nbsp;committer)'
          urlpath = "%s/%s.html" % (detail_subdir, c.canonical_name())
          fname = os.path.join(detail_subdir, "%s.html" % c.canonical_name())
          index.write('<li><p><a href="%s">%s</a>&nbsp;[%s]%s</p></li>\n'
                      % (urllib_parse_quote(urlpath),
                         c.big_name(html=True),
                         c.score_str(), committerness))
          c.html_out(revision_url_pattern, fname)
    seen_contributors[c] = True
  index.write('</ol>\n')
  index.write(html_footer())
  index.close()
Пример #5
0
 def __post_process(self):
     if not self.title:
         self.title = "Untitled - {0}".format(self.date)
     if not self.slug:
         self.slug = create_slug(self.title)
     if not self.categories or len(self.categories) == 0:
         self.categories = set([Category('uncategorized')])
     if self.guid:
         # Used for expanding :uuid in permalink template code below
         uuid = urllib_parse_quote(self.guid)
     else:
         self.guid = uuid = create_guid(self.title, self.date)
     if not self.permalink and \
             blog_config.auto_permalink.enabled:
         self.permalink = create_permalink(
             blog_config.auto_permalink.path, bf.config.site.url,
             blog_config.path, self.title, self.date, uuid, self.filename)
     logger.debug("Permalink: {0}".format(self.permalink))
Пример #6
0
 def __post_process(self):
     if not self.title:
         self.title = "Untitled - {0}".format(self.date)
     if not self.slug:
         self.slug = create_slug(self.title)
     if not self.categories or len(self.categories) == 0:
         self.categories = set([Category('uncategorized')])
     if self.guid:
         # Used for expanding :uuid in permalink template code below
         uuid = urllib_parse_quote(self.guid)
     else:
         self.guid = uuid = create_guid(self.title, self.date)
     if not self.permalink and \
             blog_config.auto_permalink.enabled:
         self.permalink = create_permalink(blog_config.auto_permalink.path,
                                           bf.config.site.url,
                                           blog_config.path, self.title,
                                           self.date, uuid, self.filename)
     logger.debug("Permalink: {0}".format(self.permalink))
Пример #7
0
    def __post_process(self):
        # fill in empty default value
        if not self.date:
            self.date = datetime.datetime.now(pytz.timezone(self.__timezone))
        if not self.updated:
            self.updated = self.date

        #Make sure dates have timezone info:
        if not self.date.tzinfo:
            pytz.timezone(self.__timezone).localize(self.date)
        if not self.updated.tzinfo:
            pytz.timezone(self.__timezone).localize(self.updated)

        if not self.title:
            self.title = "Untitled - {0}".format(self.date)
        if not self.slug:
            if config.slugify:
                #The use has provided their own slugify function, use that:
                self.slug = config.slugify(self)
            else:
                self.slug = create_slug(self.title)

        if not self.categories or len(self.categories) == 0:
            self.categories = set([Category('uncategorized')])
        if self.guid:
            uuid = urllib_parse_quote(
                self.guid
            )  #used for expandling :uuid in permalink template code below
        else:
            self.guid = uuid = create_guid(self.title, self.date)
        if not self.permalink and \
                blog_config.auto_permalink.enabled:
            self.permalink = create_permalink(blog_config.auto_permalink.path,
                                              bf.config.site.url,
                                              blog_config.path, self.title,
                                              self.date, uuid, self.filename)

        logger.debug("Permalink: {0}".format(self.permalink))
 def uri_encode(s):
     # urllib.parse.quote encodes :&@ characters, svn does not.
     return urllib_parse_quote(s, safe='/:&@')
Пример #9
0
    def __init__(self):
        notify("┌─ " + messages.loading_data)
        self.root_page = None
        self.blog_configuration = get_blog_configuration()
        self.sort_by = self.blog_configuration["sort_by"]
        self.enable_jsonld = self.blog_configuration["enable_jsonld"]
        self.enable_jsonp = self.blog_configuration["enable_jsonp"]
        self.blog_url = self.blog_configuration["blog_url"]
        self.path_encoding = self.blog_configuration["path_encoding"]
        self.disable_threads = [
            thread_name.strip() for thread_name in
            self.blog_configuration["disable_threads"].split(',')
        ]
        self.entries = list()
        self.entries_per_archives = list()
        self.entries_per_categories = list()

        try:
            self.cpu_threads_requested_entry = [None] * cpu_count()

        except NotImplementedError:
            self.cpu_threads_requested_entry = [None]

        self.max_category_weight = 1
        self.categories_leaves = []
        self.embed_providers = {}
        self.html_categories_tree = {}
        self.html_categories_leaves = {}
        self.html_blog_archives = {}
        self.cache_get_entry_attribute_by_id = {}
        self.cache_get_chapter_attribute_by_index = {}
        self.generation_timestamp = datetime.datetime.now()
        self.raw_chapters = {}
        self.chapters_index = []
        self.html_chapters = {}

        # Build JSON-LD doc if any
        if self.enable_jsonld or self.enable_jsonp:
            if "https://schema.org" in self.blog_configuration.keys():
                self.optionals_schemadotorg = self.blog_configuration[
                    "https://schema.org"]

            else:
                self.optionals_schemadotorg = {}

            self.entries_as_jsonld = {}
            self.archives_as_jsonld = {}
            self.categories_as_jsonld = {}
            self.root_site_to_jsonld()

        # Build entries
        try:
            jsonld_callback = self.entry_to_jsonld_callback if (
                self.enable_jsonld or self.enable_jsonp) else None
            for filename in yield_entries_content():
                self.entries.append(
                    Entry(
                        filename, self.blog_configuration["path"],
                        jsonld_callback, self.blog_configuration["path"]
                        ["archives_directory_name"], self.path_encoding))

        # Might happen during Entry creation.
        except MalformedPatterns as e:
            from venc2.helpers import handle_malformed_patterns
            handle_malformed_patterns(e)

        self.entries = sorted(self.entries, key=lambda entry: self.sort(entry))

        path_categories_sub_folders = self.blog_configuration["path"][
            "categories_sub_folders"] + '/'
        path_archives_directory_name = self.blog_configuration["path"][
            "archives_directory_name"]

        for entry_index in range(0, len(self.entries)):
            current_entry = self.entries[entry_index]
            if entry_index > 0:
                self.entries[entry_index - 1].next_entry = current_entry
                current_entry.previous_entry = self.entries[entry_index - 1]

            # Update entriesPerDates
            if path_archives_directory_name != '':
                formatted_date = current_entry.formatted_date
                entries_index = self.get_entries_index_for_given_date(
                    formatted_date)
                if entries_index != None:
                    self.entries_per_archives[entries_index].count += 1
                    self.entries_per_archives[entries_index].related_to.append(
                        entry_index)

                else:
                    self.entries_per_archives.append(
                        MetadataNode(formatted_date, entry_index))

            # Update entriesPerCategories
            try:
                if self.path_encoding == '':
                    sub_folders = quirk_encoding(
                        unidecode.unidecode(path_categories_sub_folders))
                else:
                    sub_folders = urllib_parse_quote(
                        path_categories_sub_folders,
                        encoding=self.path_encoding)

            except UnicodeEncodeError as e:
                notify("\"{0}\": ".format(path_categories_sub_folders) +
                       str(e),
                       color="YELLOW")

            sub_folders = sub_folders if sub_folders != '/' else ''
            build_categories_tree(entry_index,
                                  current_entry.raw_categories,
                                  self.entries_per_categories,
                                  self.categories_leaves,
                                  self.max_category_weight,
                                  self.set_max_category_weight,
                                  encoding=self.path_encoding,
                                  sub_folders=sub_folders)
            self.update_chapters(current_entry)

        # build chapters index
        path_chapters_sub_folders = self.blog_configuration["path"][
            "chapters_sub_folders"]
        path_chapter_folder_name = self.blog_configuration["path"][
            "chapter_directory_name"]
        #TODO: Might be not safe, must test level if is actually an int. Test as well the whole sequence.

        for chapter in sorted(self.raw_chapters.keys(),
                              key=lambda x: int(x.replace('.', ''))):
            top = self.chapters_index
            index = ''
            levels = [
                str(level) for level in chapter.split('.') if level != ''
            ]
            len_levels = len(levels)

            for i in range(0, len_levels):
                l = levels[i]
                if index == '':
                    index = l

                else:
                    index += '.' + l

                f = filter(lambda c: c.index == index, top)
                try:
                    top = next(f).sub_chapters

                except StopIteration:
                    if index in self.raw_chapters.keys():
                        # TODO: Replace this shitty bloc by a function call building path
                        try:
                            path = "\x1a" + (
                                (path_chapters_sub_folders + '/'
                                 if path_chapters_sub_folders != '' else '') +
                                path_chapter_folder_name).format(
                                    **{
                                        "chapter_name":
                                        self.raw_chapters[index].title,
                                        "chapter_index": index
                                    })
                            try:
                                if self.path_encoding == '':
                                    path = quirk_encoding(
                                        unidecode.unidecode(path))

                                else:
                                    path = urllib_parse_quote(
                                        path, encoding=self.path_encoding)

                            except UnicodeEncodeError as e:
                                notify("\"{0}\": ".format(
                                    path_chapters_sub_folders) + str(e),
                                       color="YELLOW")

                        except KeyError as e:
                            from venc2.helpers import die
                            die(messages.variable_error_in_filename.format(e))

                        top.append(
                            Chapter(index, self.raw_chapters[index], path))
                        self.raw_chapters[index].chapter = top[-1]

                    else:
                        top.append(Chapter(index, None, ''))
                        top = top[-1].sub_chapters

        # Setup BlogArchives Data
        self.blog_archives = list()

        path_archives_sub_folders = self.blog_configuration["path"][
            "archives_sub_folders"] + '/'
        for node in self.entries_per_archives:
            try:
                if self.path_encoding == '':
                    sub_folders = quirk_encoding(
                        unidecode.unidecode(path_archives_sub_folders))
                else:
                    sub_folders = urllib_parse_quote(
                        path_archives_sub_folders, encoding=self.path_encoding)

            except UnicodeEncodeError as e:
                notify("\"{0}\": ".format(path_archives_sub_folders) + str(e),
                       color="YELLOW")

            sub_folders = sub_folders if sub_folders != '/' else ''

            self.blog_archives.append({
                "value": node.value,
                "path": "\x1a" + sub_folders + node.value,
                "count": node.count,
                "weight": node.weight
            })
 def uri_encode(s):
     # urllib.parse.quote encodes :&@ characters, svn does not.
     return urllib_parse_quote(s, safe='/:&@')
Пример #11
0
def drop(revision_url_pattern):
    # Output the data.
    #
    # The data structures are all linked up nicely to one another.  You
    # can get all the LogMessages, and each LogMessage contains all the
    # Contributors involved with that commit; likewise, each Contributor
    # points back to all the LogMessages it contributed to.
    #
    # However, the HTML output is pretty simple right now.  It's not take
    # full advantage of all that cross-linking.  For each contributor, we
    # just create a file listing all the revisions contributed to; and we
    # build a master index of all contributors, each name being a link to
    # that contributor's individual file.  Much more is possible... but
    # let's just get this up and running first.

    for key in LogMessage.all_logs.keys():
        # You could print out all log messages this way, if you wanted to.
        pass
        # print LogMessage.all_logs[key]

    detail_subdir = "detail"
    if not os.path.exists(detail_subdir):
        os.mkdir(detail_subdir)

    index = open('index.html', 'w')
    index.write(html_header('Contributors as of r%d' % LogMessage.max_revnum))
    index.write(index_introduction)
    index.write('<ol>\n')
    # The same contributor appears under multiple keys, so uniquify.
    seen_contributors = {}
    # Sorting alphabetically is acceptable, but even better would be to
    # sort by number of contributions, so the most active people appear at
    # the top -- that way we know whom to look at first for commit access
    # proposals.
    sorted_contributors = sorted(Contributor.all_contributors.values(),
                                 key=Contributor.sort_key,
                                 reverse=True)
    for c in sorted_contributors:
        if c not in seen_contributors:
            if c.score() > 0:
                if c.is_full_committer:
                    # Don't even bother to print out full committers.  They are
                    # a distraction from the purposes for which we're here.
                    continue
                else:
                    committerness = ''
                    if c.is_committer:
                        committerness = '&nbsp;(partial&nbsp;committer)'
                    urlpath = "%s/%s.html" % (detail_subdir,
                                              c.canonical_name())
                    fname = os.path.join(detail_subdir,
                                         "%s.html" % c.canonical_name())
                    index.write(
                        '<li><p><a href="%s">%s</a>&nbsp;[%s]%s</p></li>\n' %
                        (urllib_parse_quote(urlpath), c.big_name(html=True),
                         c.score_str(), committerness))
                    c.html_out(revision_url_pattern, fname)
        seen_contributors[c] = True
    index.write('</ol>\n')
    index.write(html_footer())
    index.close()