def category_to_tags(command, argv):
    """Goes through all entries and converts the category to tags
    metadata.

    It adds the tags line as the second line.

    It maintains the mtime for the file.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback(
        "entryparser", {"txt": pyblosxom.blosxom_entry_parser},
        mappingfunc=lambda x, y: y,
        defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)

    if not datadir.endswith(os.sep):
        datadir = datadir + os.sep

    for mem in filelist:
        print "working on %s..." % mem

        category = os.path.dirname(mem)[len(datadir):]
        tags = category.split(os.sep)
        print "   adding tags %s" % tags
        tags = "#tags %s\n" % (sep.join(tags))

        atime, mtime = os.stat(mem)[7:9]

        fp = open(mem, "r")
        data = fp.readlines()
        fp.close()

        data.insert(1, tags)

        fp = open(mem, "w")
        fp.write("".join(data))
        fp.close()

        os.utime(mem, (atime, mtime))

    return 0
Example #2
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        baseurl = config.get("base_url", "")

        archives = {}
        archive_list = tools.walk(self._request, root)
        items = []

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            y = time.strftime("%Y", timetuple)
            m = time.strftime("%m", timetuple)
            d = time.strftime("%d", timetuple)
            l = "<a href=\"%s/%s/\">%s</a><br>" % (baseurl, y, y)

            if not archives.has_key(y):
                archives[y] = l
            items.append([
                "%s-%s" % (y, m),
                "%s-%s-%s" % (y, m, d),
                time.mktime(timetuple), mem
            ])

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()

        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
        self._items = items
Example #3
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        archives = {}
        archive_list = tools.walk(self._request, root)
        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)

        template = config.get('archive_template',
                    '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />')
        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)
            timedict = {}
            for x in ["B", "b", "m", "Y", "y"]:
                timedict[x] = time.strftime("%" + x, timetuple)

            fulldict.update(timedict)
            if not (timedict['Y'] + timedict['m']) in archives:
                archives[timedict['Y'] + timedict['m']] = (template % fulldict)

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()
        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
Example #4
0
    def keys(self):
        """
        Returns a list of the keys found in this entrypickle instance.
        This corresponds to the list of entries that are cached.

        @returns: list of full paths to entries that are cached
        @rtype: list of strings
        """
        import re
        keys = []
        cached = []
        if os.path.isdir(self._config):
            cached = tools.walk(self._request, self._config, 1,
                                re.compile(r'.*\.entrypickle$'))
        for cache in cached:
            cache_data = pickle.load(open(cache))
            key = cache_data.get('realfilename', '')
            if not key and os.path.isfile(cache):
                os.remove(cache)
            self.load(key)
            if not self.isCached():
                self.rmEntry()
            else:
                keys.append(key)
        return keys
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        baseurl = config.get("base_url", "")

        archives = {}
        archive_list = tools.walk(self._request, root)
        items = []

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            y = time.strftime("%Y", timetuple)
            m = time.strftime("%m", timetuple)
            d = time.strftime("%d", timetuple)
            l = "<a href=\"%s/%s/\">%s</a><br>" % (baseurl, y, y)

            if not archives.has_key(y):
                archives[y] = l
            items.append(["%s-%s" % (y, m), "%s-%s-%s" % (y, m, d),
                          time.mktime(timetuple), mem])

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()

        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
        self._items = items
Example #6
0
    def keys(self):
        """
        Returns a list of the keys found in this entrypickle instance.
        This corresponds to the list of entries that are cached.

        @returns: list of full paths to entries that are cached
        @rtype: list of strings
        """
        import re
        keys = []
        cached = []
        if os.path.isdir(self._config):
            cached = tools.walk(self._request,
                                self._config,
                                1,
                                re.compile(r'.*\.entrypickle$'))
        for cache in cached:
            cache_data = pickle.load(open(cache))
            key = cache_data.get('realfilename', '')
            if not key and os.path.isfile(cache):
                os.remove(cache)
            self.load(key)
            if not self.isCached():
                self.rmEntry()
            else:
                keys.append(key)
        return keys
Example #7
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        archives = {}
        archive_list = tools.walk(self._request, root)
        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)

        template = config.get(
            'archive_template',
            '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />')
        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)
            timedict = {}
            for x in ["B", "b", "m", "Y", "y"]:
                timedict[x] = time.strftime("%" + x, timetuple)

            fulldict.update(timedict)
            if not (timedict['Y'] + timedict['m']) in archives:
                archives[timedict['Y'] + timedict['m']] = (template % fulldict)

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()
        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
Example #8
0
def category_to_tags(command, argv):
    """Goes through all entries and converts the category to tags
    metadata.

    It adds the tags line as the second line.

    It maintains the mtime for the file.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")

    from Pyblosxom.pyblosxom import Request
    from Pyblosxom.blosxom import blosxom_entry_parser
    from Pyblosxom import tools

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback("entryparser",
                                            {"txt": blosxom_entry_parser},
                                            mappingfunc=lambda x, y: y,
                                            defaultfunc=lambda x: x)

    req = Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)

    if not datadir.endswith(os.sep):
        datadir = datadir + os.sep

    for mem in filelist:
        print "working on %s..." % mem

        category = os.path.dirname(mem)[len(datadir):]
        tags = category.split(os.sep)
        print "   adding tags %s" % tags
        tags = "#tags %s\n" % (sep.join(tags))

        atime, mtime = os.stat(mem)[7:9]

        fp = open(mem, "r")
        data = fp.readlines()
        fp.close()

        data.insert(1, tags)

        fp = open(mem, "w")
        fp.write("".join(data))
        fp.close()

        os.utime(mem, (atime, mtime))

    return 0
Example #9
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'dir':
        filelist = tools.walk(request,
                              data['root_datadir'],
                              int(config.get("depth", "0")))
    elif data['bl_type'] == 'file':
        filelist = [data['root_datadir']]
    else:
        filelist = []

    entrylist = [FileEntry(request, e, data["root_datadir"]) for e in filelist]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr", ""):
        tmp_pi_mo = data.get("pi_mo", "")
        datestr = "%s%s%s" % (data.get("pi_yr", ""),
                              tools.month2num.get(tmp_pi_mo, tmp_pi_mo),
                              data.get("pi_da", ""))
        entrylist = [x for x in entrylist
                     if time.strftime("%Y%m%d%H%M%S", x["timetuple"]).startswith(datestr)]

    ids = [e.get_id() for e in entrylist]
    dupes = []
    for e in entrylist:
        if (e._realfilename != e._filename) and (e._realfilename in ids):
            dupes.append(e)
    for e in dupes:
        entrylist.remove(e)

    args = {"request": request, "entry_list": entrylist}
    entrylist = tools.run_callback("sortlist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entrylist}    
    entrylist = tools.run_callback("truncatelist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_truncate_list_handler)

    return entrylist
Example #10
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'dir':
        file_list = tools.walk(request, data['root_datadir'],
                               int(config.get("depth", "0")))
    elif data['bl_type'] == 'file':
        file_list = [data['root_datadir']]
    else:
        file_list = []

    entry_list = [
        FileEntry(request, e, data["root_datadir"]) for e in file_list
    ]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr", ""):
        tmp_pi_mo = data.get("pi_mo", "")
        date_str = "%s%s%s" % (data.get(
            "pi_yr", ""), tools.month2num.get(
                tmp_pi_mo, tmp_pi_mo), data.get("pi_da", ""))
        entry_list = [
            x for x in entry_list if time.strftime(
                "%Y%m%d%H%M%S", x["timetuple"]).startswith(date_str)
        ]

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("sortlist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("truncatelist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_truncate_list_handler)

    return entry_list
Example #11
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        baseurl = config.get("base_url", "")

        archives = {}
        archive_list = tools.walk(self._request, root)
        items = []

        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)

        flavour = data.get(
            "flavour", config.get("default_flavour", "html"))

        template = config.get(
            'archive_template',
            '<a href="%(base_url)s/%(Y)s/index.%(f)s">%(Y)s</a><br />')

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            timedict = {}
            for x in ["m", "Y", "y", "d"]:
                timedict[x] = time.strftime("%" + x, timetuple)

            fulldict.update(timedict)
            fulldict["f"] = flavour
            year = fulldict["Y"]

            if not year in archives:
                archives[year] = template % fulldict
            items.append(
                ["%(Y)s-%(m)s" % fulldict,
                 "%(Y)s-%(m)s-%(d)s" % fulldict,
                 time.mktime(timetuple),
                 mem])

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()

        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
        self._items = items
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        baseurl = config.get("base_url", "")

        archives = {}
        archive_list = tools.walk(self._request, root)
        items = []

        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)

        flavour = data.get("flavour", config.get("default_flavour", "html"))

        template = config.get(
            'archive_template',
            '<a href="%(base_url)s/%(Y)s/index.%(f)s">%(Y)s</a><br />')

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            timedict = {}
            for x in ["m", "Y", "y", "d"]:
                timedict[x] = time.strftime("%" + x, timetuple)

            fulldict.update(timedict)
            fulldict["f"] = flavour
            year = fulldict["Y"]

            if not archives.has_key(year):
                archives[year] = template % fulldict
            items.append([
                "%(Y)s-%(m)s" % fulldict,
                "%(Y)s-%(m)s-%(d)s" % fulldict,
                time.mktime(timetuple), mem
            ])

        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()

        result = []
        for key in arc_keys:
            result.append(archives[key])
        self._archives = '\n'.join(result)
        self._items = items
Example #13
0
def buildtags(command, argv):
    """Builds the tags index.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)
    
    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback("entryparser",
                                            {"txt": pyblosxom.blosxom_entry_parser},
                                            mappingfunc=lambda x, y:y,
                                            defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
def buildtags(command, argv):
    """Builds the tags index.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback(
        "entryparser", {"txt": pyblosxom.blosxom_entry_parser},
        mappingfunc=lambda x, y: y,
        defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Example #15
0
def buildtags(command, argv):
    """Command for building the tags index."""
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom.pyblosxom import Pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    # build a Pyblosxom object, initialize it, and run the start
    # callback.  this gives entry parsing related plugins a chance to
    # get their stuff together so that they work correctly.
    p = Pyblosxom(config.py, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback("start", {"request": req})

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Example #16
0
def buildtags(command, argv):
    """Command for building the tags index."""
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom.pyblosxom import blosxom_entry_parser, Pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    # build a Pyblosxom object, initialize it, and run the start
    # callback.  this gives entry parsing related plugins a chance to
    # get their stuff together so that they work correctly.
    p = Pyblosxom(config.py, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback("start", {"request": req})

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Example #17
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        archives = {}
        archive_list = tools.walk(self._request, root)
        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)
        template = config.get("archive_template", '<a href="%(base_url)s/%(Y)s/%(m)s">%(y)s.%(m)s</a>')
        # <a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a>
        # print fulldict["base_url"]
        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)
            timedict = {}
            for x in ["B", "b", "m", "Y", "y"]:
                timedict[x] = time.strftime("%" + x, timetuple)
            fulldict.update(timedict)
            if not archives.has_key(timedict["Y"] + timedict["m"]):
                archives[timedict["Y"] + timedict["m"]] = [template % fulldict, 1]
            else:
                archives[timedict["Y"] + timedict["m"]][1] += 1
                archives[timedict["Y"] + timedict["m"]][0] = template % fulldict
        # print archives
        # return
        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()
        yearmonth = {}
        result = []
        # base archives walk and count every year's mounth
        for key in arc_keys:
            yearname = key[:-2]
            if yearname in yearmonth.keys():
                yearmonth[yearname][0] += archives[key][1]
                yearmonth[yearname][1].append(archives[key])
            else:
                yearmonth[yearname] = [archives[key][1], []]
                yearmonth[yearname][1].append(archives[key])
        # print yearmonth["2007"]
        mon_keys = yearmonth.keys()
        mon_keys.sort()
        mon_keys.reverse()
        # print mon_keys
        for year in mon_keys:
            # print "%s<sup>%s<sup>"%(year,yearmonth[year][0])
            monode = yearmonth[year][1]
            result.append(
                "<li class='yearchives'><a href='%s'>%s</a><sup>(%d)</sup></li>"
                % (fulldict["base_url"], year, yearmonth[year][0])
            )
            if 1 == len(monode):
                # print "%s<sup>%s<sup>"%(monode[0][0],monode[0][1])
                result.append("<li>%s<sup>(%d)</sup><li>" % (monode[0][0], monode[0][1]))
            else:
                for m in monode:
                    # print m
                    # print "%s<sup>%s<sup>"%(m[0],m[1])
                    result.append("<li>%s<sup>(%d)</sup><li>" % (m[0], m[1]))
                    # result.append("%s<sup>%s<sup>"%(month[0],month[1]))

        # print result
        self._archives = "\n".join(result)
Example #18
0
def cb_filelist(args):
    request = args['request']
    http = request.getHttp()
    data = request.getData()
    config = request.getConfiguration()
    _baseurl = config.get("base_url", "")

    trigger = config.get('cindex_trigger', 'site-index')

    if http['PATH_INFO'] != trigger:
        return

    # get the entries
    datadir = config['datadir']
    #print dir(tools)
    #print help(tools.walk)
    '''upgrade as tools.walk
    walk(request, root='.', recurse=0, pattern='', return_folders=0)
    :param request: the Request object
    :param root: the root directory to walk
    :param recurse: the depth of recursion; defaults to 0 which goes all
                    the way down
    :param pattern: the regexp object for matching files; defaults to
                    '' which causes PyBlosxom to return files with
                    file extensions that match those the entryparsers
                    handle
    :param return_folders: True if you want only folders, False if you
                    want files AND folders
    
    :returns: a list of file paths.
    '''
    files = tools.walk(request, root=datadir, recurse=3)
    files.sort()
    #print files
    #print len(files)
    #return
    body = '<div id="categoriselist">'
    #print files
    # sort into sections, one for each letter. the dictionary is
    # letter => (entry name, path) where path is the relative to datadir.
    #sections = {}
    #   the entrise dictionary is
    # path => (entry name, 0)
    entrise = []
    entry_extensions = data['extensions'].keys()

    for file in files:
        #objEntry = entries.fileentry.FileEntry(request, file,datadir)
        #print objEntry.keys()
        assert file.startswith(datadir)
        path, ext = os.path.splitext(file[len(datadir):])
        if ext[1:] in entry_extensions:  # strip the leading period from ext
            entry_name = os.path.basename(path)
            #sections.setdefault(entry_name[0].upper(), []).append((entry_name, path))
            entrise.append((entry_name, path, file))
    #print entrise
    #sortPaths = sorted(entrise.iteritems(), key=itemgetter(1), reverse=True)
    #print sortPaths
    #print _baseurl
    etree = {}
    """{
    "pathID":[(path.split()),"title",...]
    ,
    }
    """
    for entry in entrise:
        e = Pyblosxom.entries.fileentry.FileEntry(request, entry[2], entry[1])
        deeps = entry[1].split("/")[:-1]
        pathID = "".join(deeps)
        if pathID in etree:
            etree[pathID].append((e['title'], entry[1]))
        else:
            etree[pathID] = [tuple(deeps), (e['title'], entry[1])]
    #print etree.keys()
    root_path_list = config.get("category_root_list", DEFAULT_ROOT)
    root_entry_list = []
    for opath in root_path_list:
        #print opath
        crtRoot = []
        for k in etree.keys():
            if opath in k:
                crtRoot.append(k)
        crtRoot.sort()
        root_entry_list.append((opath, crtRoot))
    '''root_entry_list as::
    [('Zen', ['ZenChinese', 'ZenGoogle', 'Zenpythonic']), ('oss', ['oss', 'ossFreeBSD', 'ossMozillaFireFox', 'ossUbuntu']), ('opening', []), ('mind', ['mind']), ('Quiet', ['Quietliving', 'Quietnomeans']), ('utility', ['utilitySubversion', 'utilitySubversionhooks', 'utilitypy4strStructuredText', 'utilitypy4webDjango', 'utilitypy4webMoinMoin', 'utilitypy4webQuixote', 'utilitypy4zh', 'utilityzqlib']), ('internet', ['internet', 'internetFolksonomy']), ('easy', ['easymovie', 'easymusic']), ('techic', ['techic', 'techicEmacs', 'techicPyBlosxom', 'techicPyBlosxomblosxom', 'techicPyBlosxomplugins'])]
    '''
    body += '<h3>/</h3>'
    #print etree
    for e in etree[''][1:]:
        #print "etree[''] include::",e
        body += '<span class="indents">%s</span><a href="%s%s.html">%s</a><br>\n' % (
            "....", _baseurl, e[1], e[0])
    #print root_entry_list
    for k in root_entry_list:
        #['techic', 'techicEmacs', 'techicPyBlosxom', 'techicPyBlosxomblosxom', 'techicPyBlosxomplugins']
        body += '<h4>%s/</h4>' % k[0]
        cpath = ""
        for p in k[1]:
            #print etree[p]
            #[('', 'Zen', 'Chinese'), '9.18', 'CC Salon BJ', '\xe2\x80\x9c\xe5\x9b\xbd\xe9\x99\x85\xe8\x87\xaa\xe7\x94\xb1\xe8\xbd\xaf\xe4\xbb\xb6\xe6\x97\xa5\xe2\x80\x9d\xe4\xb9\x8b\xe5\xa4\xb4\xe8\x84\x91\xe9\xa3\x8e\xe6\x9a\xb4', '\xe8\xa1\xa8\xe5\xbd\xa2\xe7\xa0\x81\xe7\x9a\x84\xe6\xb6\x88\xe4\xba\xa1\xe8\x83\x8c\xe6\x99\xaf\xef\xbc\x81']
            epath = "/".join(etree[p][0][2:])
            if k[0] != "".join(etree[p][0]):
                if cpath != epath:
                    cpath = epath
                    ldeep = len(etree[p][0][1:])
                    if 3 > ldeep:
                        body += '<H5>%s/</H5>' % "/".join(etree[p][0][2:])
                    else:
                        body += '<H6>%s/</H6>' % "/".join(etree[p][0][3:])
            for e in etree[p][1:]:
                body += '<span id="%s" class="indents">%s</span><a href="%s%s.html">%s</a><br>\n' % (
                    "/".join(etree[p][0]), "..." * len(etree[p][0]), _baseurl,
                    e[1], e[0])
    '''
    [('', 'easy', 'movie'), '\xe4\xb8\x96\xe9\x97\xb4\xe5\xae\x89\xe5\xbe\x97\xe5\x8f\x8c\xe5\x85\xa8\xe6\xb3\x95,\xe4\xb8\x8d\xe8\xb4\x9f\xe5\xa6\x82\xe6\x9d\xa5\xe4\xb8\x8d\xe8\xb4\x9f\xe5\x8d\xbf!']
    [('', 'easy', 'music'), 'ZARD\xe6\xb6\x88\xe9\x80\x9d\xe4\xba\x86']

    for entry in entrise:
        e = Pyblosxom.entries.fileentry.FileEntry(request, entry[2], entry[1])
        #print e['title']
        #print entry[1].split("/")[:-1]
        body += '<span class="indents">%s</span><a href="%s%s.html">%s</a>%s<br>\n'%(
                "...."*len(entry[1].split("/"))
                ,_baseurl
                ,entry[1]
                ,e['title'] #entry[0]
                ,entry[1]
                )
    '''
    #print body
    body += "</div>"
    data = {'title': config.get('cindex_title', 'index')}
    # use the epoch for mtime. otherwise, pyblosxom uses the current time, which
    # makes other plugins (like weblogsping) think this is a new entry.
    epoch = time.localtime(0)
    fe = Pyblosxom.entries.base.generate_entry(request, data, body, epoch)
    return [fe]
Example #19
0
    def run_static_renderer(self, incremental=False):
        """This will go through all possible things in the blog and
        statically render everything to the ``static_dir`` specified
        in the config file.

        This figures out all the possible ``path_info`` settings and
        calls ``self.run()`` a bazillion times saving each file.

        :param incremental: Whether (True) or not (False) to
                            incrementally render the pages.  If we're
                            incrementally rendering pages, then we
                            render only the ones that have changed.
        """
        self.initialize()

        config = self._request.get_configuration()
        data = self._request.get_data()
        print "Performing static rendering."
        if incremental:
            print "Incremental is set."

        staticdir = config.get("static_dir", "")
        datadir = config["datadir"]

        if not staticdir:
            print "Error: You must set static_dir in your config file."
            return 0

        flavours = config.get("static_flavours", ["html"])

        renderme = []

        monthnames = config.get("static_monthnames", True)
        monthnumbers = config.get("static_monthnumbers", False)
        yearindexes = config.get("static_yearindexes", True)

        dates = {}
        categories = {}

        # first we handle entries and categories
        listing = tools.walk(self._request, datadir)

        for mem in listing:
            # skip the ones that have bad extensions
            ext = mem[mem.rfind(".")+1:]
            if not ext in data["extensions"].keys():
                continue

            # grab the mtime of the entry file
            mtime = time.mktime(tools.filestat(self._request, mem))

            # remove the datadir from the front and the bit at the end
            mem = mem[len(datadir):mem.rfind(".")]

            # this is the static filename
            fn = os.path.normpath(staticdir + mem)

            # grab the mtime of one of the statically rendered file
            try:
                smtime = os.stat(fn + "." + flavours[0])[8]
            except:
                smtime = 0

            # if the entry is more recent than the static, we want to
            # re-render
            if smtime < mtime or not incremental:

                # grab the categories
                temp = os.path.dirname(mem).split(os.sep)
                for i in range(len(temp)+1):
                    p = os.sep.join(temp[0:i])
                    categories[p] = 0

                # grab the date
                mtime = time.localtime(mtime)
                year = time.strftime("%Y", mtime)
                month = time.strftime("%m", mtime)
                day = time.strftime("%d", mtime)

                if yearindexes:
                    dates[year] = 1

                if monthnumbers:
                    dates[year + "/" + month] = 1
                    dates[year + "/" + month + "/" + day] = 1

                if monthnames:
                    monthname = tools.num2month[month]
                    dates[year + "/" + monthname] = 1
                    dates[year + "/" + monthname + "/" + day] = 1

                # toss in the render queue
                for f in flavours:
                    renderme.append( (mem + "." + f, "") )

        print "rendering %d entries." % len(renderme)

        # handle categories
        categories = categories.keys()
        categories.sort()

        # if they have stuff in their root category, it'll add a "/"
        # to the category list and we want to remove that because it's
        # a duplicate of "".
        if "/" in categories:
            categories.remove("/")

        print "rendering %d category indexes." % len(categories)

        for mem in categories:
            mem = os.path.normpath(mem + "/index.")
            for f in flavours:
                renderme.append((mem + f, ""))

        # now we handle dates
        dates = dates.keys()
        dates.sort()

        dates = ["/" + d for d in dates]

        print "rendering %d date indexes." % len(dates)

        for mem in dates:
            mem = os.path.normpath(mem + "/index.")
            for f in flavours:
                renderme.append((mem + f, ""))

        # now we handle arbitrary urls
        additional_stuff = config.get("static_urls", [])
        print "rendering %d arbitrary urls." % len(additional_stuff)

        for mem in additional_stuff:
            if mem.find("?") != -1:
                url = mem[:mem.find("?")]
                query = mem[mem.find("?")+1:]
            else:
                url = mem
                query = ""

            renderme.append((url, query))

        # now we pass the complete render list to all the plugins via
        # cb_staticrender_filelist and they can add to the filelist
        # any (url, query) tuples they want rendered.
        print "(before) building %s files." % len(renderme)
        tools.run_callback("staticrender_filelist",
                           {'request': self._request,
                            'filelist': renderme,
                            'flavours': flavours,
                            'incremental': incremental})

        renderme = sorted(set(renderme))

        print "building %s files." % len(renderme)

        for url, q in renderme:
            url = url.replace(os.sep, "/")
            print "rendering '%s' ..." % url

            tools.render_url_statically(config, url, q)

        # we're done, clean up
        self.cleanup()
Example #20
0
    def gen_categories(self):
        config = self._request.get_configuration()
        root = config["datadir"]

        start_t = config.get("category_start", DEFAULT_START)
        begin_t = config.get("category_begin", DEFAULT_BEGIN)
        item_t = config.get("category_item", DEFAULT_ITEM)
        end_t = config.get("category_end", DEFAULT_END)
        finish_t = config.get("category_finish", DEFAULT_FINISH)

        self._baseurl = config.get("base_url", "")

        form = self._request.get_form()
        flavour = (form['flav'].value
                   if 'flav' in form
                   else config.get('default_flavour', 'html'))

        # build the list of all entries in the datadir
        elist = tools.walk(self._request, root)

        # peel off the root dir from the list of entries
        elist = [mem[len(root) + 1:] for mem in elist]

        # go through the list of entries and build a map that
        # maintains a count of how many entries are in each category
        elistmap = {}
        for mem in elist:
            mem = os.path.dirname(mem)
            elistmap[mem] = 1 + elistmap.get(mem, 0)
        self._elistmap = elistmap

        # go through the elistmap keys (which is the list of
        # categories) and for each piece in the key (i.e. the key
        # could be "dev/pyblosxom/releases" and the pieces would be
        # "dev", "pyblosxom", and "releases") we build keys for the
        # category list map (i.e. "dev", "dev/pyblosxom",
        # "dev/pyblosxom/releases")
        clistmap = {}
        for mem in elistmap.keys():
            mem = mem.split(os.sep)
            for index in range(len(mem) + 1):
                p = os.sep.join(mem[0:index])
                clistmap[p] = 0

        # then we take the category list from the clistmap and sort it
        # alphabetically
        clist = clistmap.keys()
        clist.sort()

        output = []
        indent = 0

        output.append(start_t)
        # then we generate each item in the list
        for item in clist:
            itemlist = item.split(os.sep)

            num = 0
            for key in self._elistmap.keys():
                if item == '' or key == item or key.startswith(item + os.sep):
                    num = num + self._elistmap[key]

            if not item:
                tab = ""
            else:
                tab = len(itemlist) * "&nbsp;&nbsp;"

            if itemlist != ['']:
                if indent > len(itemlist):
                    for i in range(indent - len(itemlist)):
                        output.append(end_t)

                elif indent < len(itemlist):
                    for i in range(len(itemlist) - indent):
                        output.append(begin_t)

            # now we build the dict with the values for substitution
            d = {"base_url": self._baseurl,
                 "fullcategory": item + "/",
                 "category": itemlist[-1] + "/",
                 "flavour": flavour,
                 "count": num,
                 "indent": tab}

            # this prevents a double / in the root category url
            if item == "":
                d["fullcategory"] = item

            # this adds urlencoded versions
            d["fullcategory_urlencoded"] = (
                tools.urlencode_text(d["fullcategory"]))
            d["category_urlencoded"] = tools.urlencode_text(d["category"])

            # and we toss it in the thing
            output.append(item_t % d)

            if itemlist != ['']:
                indent = len(itemlist)

        output.append(end_t * indent)
        output.append(finish_t)

        # then we join the list and that's the final string
        self._categories = "\n".join(output)
Example #21
0
    def generate_calendar(self):
        """
        Generates the calendar.  We'd like to walk the archives
        for things that happen in this month and mark the dates
        accordingly.  After doing that we pass it to a formatting
        method which turns the thing into HTML.
        """
        config = self._request.get_configuration()
        data = self._request.get_data()
        entry_list = data["entry_list"]

        root = config["datadir"]
        baseurl = config.get("base_url", "")

        self._today = time.localtime()

        if len(entry_list) == 0:
            # if there are no entries, we shouldn't even try to
            # do something fancy.
            self._cal = ""
            return

        view = list(entry_list[0]["timetuple"])

        # this comes in as '', 2001, 2002, 2003, ...  so we can convert it
        # without an issue
        temp = data.get("pi_yr")
        if not temp:
            view[0] = int(self._today[0])
        else:
            view[0] = int(temp)

        # the month is a bit harder since it can come in as "08", "", or
        # "Aug" (in the example of August).
        temp = data.get("pi_mo")
        if temp and temp.isdigit():
            view[1] = int(temp)
        elif temp and tools.month2num.has_key(temp):
            view[1] = int(tools.month2num[temp])
        else:
            view[1] = int(self._today[1])

        self._view = view = tuple(view)

        # if we're looking at a specific day, we figure out what it is
        if data.get("pi_yr") and data.get("pi_mo") and data.get("pi_da"):
            if data["pi_mo"].isdigit():
                mon = data["pi_mo"]
            else:
                mon = tools.month2num[data["pi_mo"]]

            self._specificday = (int(data.get("pi_yr",
                                              self._today[0])), int(mon),
                                 int(data.get("pi_da", self._today[2])))

        archive_list = tools.walk(self._request, root)

        yearmonth = {}

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            # if we already have an entry for this date, we skip to the
            # next one because we've already done this processing
            day = str(timetuple[2]).rjust(2)
            if self._entries.has_key(day):
                continue

            # add an entry for yyyymm so we can figure out next/previous
            year = str(timetuple[0])
            dayzfill = string.zfill(timetuple[1], 2)
            yearmonth[year + dayzfill] = time.strftime("%b", timetuple)

            # if the entry isn't in the year/month we're looking at with
            # the calendar, then we skip to the next one
            if timetuple[0:2] != view[0:2]:
                continue

            # mark the entry because it's one we want to show
            datepiece = time.strftime("%Y/%b/%d", timetuple)
            self._entries[day] = (baseurl + "/" + datepiece, day)

        # Set the first day of the week (Sunday by default)
        first = config.get('calendar_firstweekday', 6)
        calendar.setfirstweekday(first)

        # create the calendar
        cal = calendar.monthcalendar(view[0], view[1])

        # insert the days of the week
        cal.insert(0, calendar.weekheader(2).split())

        # figure out next and previous links by taking the dict of
        # yyyymm strings we created, turning it into a list, sorting
        # them, and then finding "today"'s entry.  then the one before
        # it (index-1) is prev, and the one after (index+1) is next.
        keys = yearmonth.keys()
        keys.sort()
        thismonth = time.strftime("%Y%m", view)

        # do some quick adjustment to make sure we didn't pick a
        # yearmonth that's outside the yearmonths of the entries we
        # know about.
        if thismonth in keys:
            index = keys.index(thismonth)
        elif len(keys) == 0 or keys[0] > thismonth:
            index = 0
        else:
            index = len(keys) - 1

        # build the prev link
        if index == 0 or len(keys) == 0:
            prev = None
        else:
            prev = ("%s/%s/%s" %
                    (baseurl, keys[index - 1][:4], yearmonth[keys[index - 1]]),
                    "&lt;")

        # build the next link
        if index == len(yearmonth) - 1 or len(keys) == 0:
            next = None
        else:
            next = ("%s/%s/%s" %
                    (baseurl, keys[index + 1][:4], yearmonth[keys[index + 1]]),
                    "&gt;")

        # insert the month name and next/previous links
        cal.insert(0, [prev, time.strftime("%B %Y", view), next])

        self._cal = self.format_with_css(cal)
def cb_filelist(args):
    request = args['request']
    http = request.getHttp()
    data = request.getData()
    config = request.getConfiguration()
    _baseurl = config.get("base_url", "")

    trigger = config.get('cindex_trigger', 'site-index')

    if http['PATH_INFO'] != trigger:
        return

    # get the entries
    datadir = config['datadir']
    #print dir(tools)
    #print help(tools.walk)
    '''upgrade as tools.walk
    walk(request, root='.', recurse=0, pattern='', return_folders=0)
    :param request: the Request object
    :param root: the root directory to walk
    :param recurse: the depth of recursion; defaults to 0 which goes all
                    the way down
    :param pattern: the regexp object for matching files; defaults to
                    '' which causes PyBlosxom to return files with
                    file extensions that match those the entryparsers
                    handle
    :param return_folders: True if you want only folders, False if you
                    want files AND folders
    
    :returns: a list of file paths.
    '''
    files = tools.walk(request
        ,root=datadir
        ,recurse=3)
    files.sort()
    #print files
    #print len(files)
    #return
    body = '<div id="categoriselist">'
    #print files
    # sort into sections, one for each letter. the dictionary is 
    # letter => (entry name, path) where path is the relative to datadir.
    #sections = {}
    #   the entrise dictionary is 
    # path => (entry name, 0) 
    entrise = []
    entry_extensions = data['extensions'].keys()

    for file in files:
        #objEntry = entries.fileentry.FileEntry(request, file,datadir)
        #print objEntry.keys()  
        assert file.startswith(datadir)
        path, ext = os.path.splitext(file[len(datadir):])
        if ext[1:] in entry_extensions:  # strip the leading period from ext
            entry_name = os.path.basename(path)
            #sections.setdefault(entry_name[0].upper(), []).append((entry_name, path))
            entrise.append((entry_name,path,file))
    #print entrise
    #sortPaths = sorted(entrise.iteritems(), key=itemgetter(1), reverse=True)
    #print sortPaths
    #print _baseurl
    etree = {}
    """{
    "pathID":[(path.split()),"title",...]
    ,
    }
    """
    for entry in entrise:
        e = Pyblosxom.entries.fileentry.FileEntry(request, entry[2], entry[1])
        deeps = entry[1].split("/")[:-1]
        pathID = "".join(deeps)
        if pathID in etree:
            etree[pathID].append((e['title'],entry[1]))
        else:
            etree[pathID]= [tuple(deeps),(e['title'],entry[1])]
    #print etree.keys()
    root_path_list = config.get("category_root_list", DEFAULT_ROOT)
    root_entry_list = []
    for opath in root_path_list:
        #print opath
        crtRoot = []
        for k in etree.keys():
            if opath in k:
                crtRoot.append(k)
        crtRoot.sort()
        root_entry_list.append((opath,crtRoot))

    '''root_entry_list as::
    [('Zen', ['ZenChinese', 'ZenGoogle', 'Zenpythonic']), ('oss', ['oss', 'ossFreeBSD', 'ossMozillaFireFox', 'ossUbuntu']), ('opening', []), ('mind', ['mind']), ('Quiet', ['Quietliving', 'Quietnomeans']), ('utility', ['utilitySubversion', 'utilitySubversionhooks', 'utilitypy4strStructuredText', 'utilitypy4webDjango', 'utilitypy4webMoinMoin', 'utilitypy4webQuixote', 'utilitypy4zh', 'utilityzqlib']), ('internet', ['internet', 'internetFolksonomy']), ('easy', ['easymovie', 'easymusic']), ('techic', ['techic', 'techicEmacs', 'techicPyBlosxom', 'techicPyBlosxomblosxom', 'techicPyBlosxomplugins'])]
    '''
    body += '<h3>/</h3>'
    #print etree
    for e in etree[''][1:]:
        #print "etree[''] include::",e
        body += '<span class="indents">%s</span><a href="%s%s.html">%s</a><br>\n'%(
            "...."
            ,_baseurl
            ,e[1]
            ,e[0]
            )
    #print root_entry_list
    for k in root_entry_list:
        #['techic', 'techicEmacs', 'techicPyBlosxom', 'techicPyBlosxomblosxom', 'techicPyBlosxomplugins']
        body += '<h4>%s/</h4>'%k[0]
        cpath = ""
        for p in k[1]:
            #print etree[p]
            #[('', 'Zen', 'Chinese'), '9.18', 'CC Salon BJ', '\xe2\x80\x9c\xe5\x9b\xbd\xe9\x99\x85\xe8\x87\xaa\xe7\x94\xb1\xe8\xbd\xaf\xe4\xbb\xb6\xe6\x97\xa5\xe2\x80\x9d\xe4\xb9\x8b\xe5\xa4\xb4\xe8\x84\x91\xe9\xa3\x8e\xe6\x9a\xb4', '\xe8\xa1\xa8\xe5\xbd\xa2\xe7\xa0\x81\xe7\x9a\x84\xe6\xb6\x88\xe4\xba\xa1\xe8\x83\x8c\xe6\x99\xaf\xef\xbc\x81']
            epath = "/".join(etree[p][0][2:])
            if k[0] != "".join(etree[p][0]):
                if cpath != epath:
                    cpath = epath
                    ldeep = len(etree[p][0][1:])
                    if 3 > ldeep:
                        body += '<H5>%s/</H5>'%"/".join(etree[p][0][2:])
                    else:
                        body += '<H6>%s/</H6>'%"/".join(etree[p][0][3:])
            for e in etree[p][1:]:
                body += '<span id="%s" class="indents">%s</span><a href="%s%s.html">%s</a><br>\n'%(
                        "/".join(etree[p][0])
                        ,"..."*len(etree[p][0])
                        ,_baseurl
                        ,e[1]
                        ,e[0]
                        )

    '''
    [('', 'easy', 'movie'), '\xe4\xb8\x96\xe9\x97\xb4\xe5\xae\x89\xe5\xbe\x97\xe5\x8f\x8c\xe5\x85\xa8\xe6\xb3\x95,\xe4\xb8\x8d\xe8\xb4\x9f\xe5\xa6\x82\xe6\x9d\xa5\xe4\xb8\x8d\xe8\xb4\x9f\xe5\x8d\xbf!']
    [('', 'easy', 'music'), 'ZARD\xe6\xb6\x88\xe9\x80\x9d\xe4\xba\x86']

    for entry in entrise:
        e = Pyblosxom.entries.fileentry.FileEntry(request, entry[2], entry[1])
        #print e['title']
        #print entry[1].split("/")[:-1]
        body += '<span class="indents">%s</span><a href="%s%s.html">%s</a>%s<br>\n'%(
                "...."*len(entry[1].split("/"))
                ,_baseurl
                ,entry[1]
                ,e['title'] #entry[0]
                ,entry[1]
                )
    '''
    #print body
    body +="</div>" 
    data = {'title': config.get('cindex_title', 'index')}
    # use the epoch for mtime. otherwise, pyblosxom uses the current time, which
    # makes other plugins (like weblogsping) think this is a new entry.
    epoch = time.localtime(0)
    fe = Pyblosxom.entries.base.generate_entry(request, data, body, epoch)
    return [fe]
Example #23
0
    def gen_categories(self):
        config = self._request.get_configuration()
        root = config["datadir"]

        start_t = config.get("category_start", DEFAULT_START)
        begin_t = config.get("category_begin", DEFAULT_BEGIN)
        item_t = config.get("category_item", DEFAULT_ITEM)
        end_t = config.get("category_end", DEFAULT_END)
        finish_t = config.get("category_finish", DEFAULT_FINISH)

        self._baseurl = config.get("base_url", "")

        form = self._request.get_form()
        flavour = (form['flav'].value
                   if 'flav' in form
                   else config.get('default_flavour', 'html'))

        # build the list of all entries in the datadir
        elist = tools.walk(self._request, root)

        # peel off the root dir from the list of entries
        elist = [mem[len(root) + 1:] for mem in elist]

        # go through the list of entries and build a map that
        # maintains a count of how many entries are in each category
        elistmap = {}
        for mem in elist:
            mem = os.path.dirname(mem)
            elistmap[mem] = 1 + elistmap.get(mem, 0)
        self._elistmap = elistmap

        # go through the elistmap keys (which is the list of
        # categories) and for each piece in the key (i.e. the key
        # could be "dev/pyblosxom/releases" and the pieces would be
        # "dev", "pyblosxom", and "releases") we build keys for the
        # category list map (i.e. "dev", "dev/pyblosxom",
        # "dev/pyblosxom/releases")
        clistmap = {}
        for mem in elistmap.keys():
            mem = mem.split(os.sep)
            for index in range(len(mem) + 1):
                p = os.sep.join(mem[0:index])
                clistmap[p] = 0

        # then we take the category list from the clistmap and sort it
        # alphabetically
        clist = clistmap.keys()
        clist.sort()

        output = []
        indent = 0

        output.append(start_t)
        # then we generate each item in the list
        for item in clist:
            itemlist = item.split(os.sep)

            num = 0
            for key in self._elistmap.keys():
                if item == '' or key == item or key.startswith(item + os.sep):
                    num = num + self._elistmap[key]

            if not item:
                tab = ""
            else:
                tab = len(itemlist) * "&nbsp;&nbsp;"

            if itemlist != ['']:
                if indent > len(itemlist):
                    for i in range(indent - len(itemlist)):
                        output.append(end_t)

                elif indent < len(itemlist):
                    for i in range(len(itemlist) - indent):
                        output.append(begin_t)

            # now we build the dict with the values for substitution
            d = {"base_url": self._baseurl,
                 "fullcategory": item + "/",
                 "category": itemlist[-1] + "/",
                 "flavour": flavour,
                 "count": num,
                 "indent": tab}

            # this prevents a double / in the root category url
            if item == "":
                d["fullcategory"] = item

            # this adds urlencoded versions
            d["fullcategory_urlencoded"] = (
                tools.urlencode_text(d["fullcategory"]))
            d["category_urlencoded"] = tools.urlencode_text(d["category"])

            # and we toss it in the thing
            output.append(item_t % d)

            if itemlist != ['']:
                indent = len(itemlist)

        output.append(end_t * indent)
        output.append(finish_t)

        # then we join the list and that's the final string
        self._categories = "\n".join(output)
Example #24
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        archives = {}
        archive_list = tools.walk(self._request, root)
        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)
        template = config.get(
            'archive_template',
            '<a href="%(base_url)s/%(Y)s/%(m)s">%(y)s.%(m)s</a>')
        #<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a>
        #print fulldict["base_url"]
        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)
            timedict = {}
            for x in ["B", "b", "m", "Y", "y"]:
                timedict[x] = time.strftime("%" + x, timetuple)
            fulldict.update(timedict)
            if not archives.has_key(timedict['Y'] + timedict['m']):
                archives[timedict['Y'] +
                         timedict['m']] = [template % fulldict, 1]
            else:
                archives[timedict['Y'] + timedict['m']][1] += 1
                archives[timedict['Y'] +
                         timedict['m']][0] = template % fulldict
        #print archives
        #return
        arc_keys = archives.keys()
        arc_keys.sort()
        arc_keys.reverse()
        yearmonth = {}
        result = []
        #base archives walk and count every year's mounth
        for key in arc_keys:
            yearname = key[:-2]
            if yearname in yearmonth.keys():
                yearmonth[yearname][0] += archives[key][1]
                yearmonth[yearname][1].append(archives[key])
            else:
                yearmonth[yearname] = [archives[key][1], []]
                yearmonth[yearname][1].append(archives[key])
        #print yearmonth["2007"]
        mon_keys = yearmonth.keys()
        mon_keys.sort()
        mon_keys.reverse()
        #print mon_keys
        for year in mon_keys:
            #print "%s<sup>%s<sup>"%(year,yearmonth[year][0])
            monode = yearmonth[year][1]
            result.append(
                "<li class='yearchives'><a href='%s/%s'>%s</a><sup>(%d)</sup></li>"
                % (fulldict["base_url"], year, year, yearmonth[year][0]))
            if 1 == len(monode):
                #print "%s<sup>%s<sup>"%(monode[0][0],monode[0][1])
                result.append("<li>%s<sup>(%d)</sup><li>" %
                              (monode[0][0], monode[0][1]))
            else:
                for m in monode:
                    #print m
                    #print "%s<sup>%s<sup>"%(m[0],m[1])
                    result.append("<li>%s<sup>(%d)</sup><li>" % (m[0], m[1]))
                    #result.append("%s<sup>%s<sup>"%(month[0],month[1]))

        #print result
        self._archives = '\n'.join(result)
Example #25
0
    def run_static_renderer(self, incremental=False):
        """This will go through all possible things in the blog and
        statically render everything to the ``static_dir`` specified
        in the config file.

        This figures out all the possible ``path_info`` settings and
        calls ``self.run()`` a bazillion times saving each file.

        :param incremental: Whether (True) or not (False) to
                            incrementally render the pages.  If we're
                            incrementally rendering pages, then we
                            render only the ones that have changed.
        """
        self.initialize()

        config = self._request.get_configuration()
        data = self._request.get_data()
        print "Performing static rendering."
        if incremental:
            print "Incremental is set."

        static_dir = config.get("static_dir", "")
        data_dir = config["datadir"]

        if not static_dir:
            print "Error: You must set static_dir in your config file."
            return 0

        flavours = config.get("static_flavours", ["html"])
        index_flavours = config.get("static_index_flavours", ["html"])

        render_me = []

        month_names = config.get("static_monthnames", True)
        month_numbers = config.get("static_monthnumbers", False)
        year_indexes = config.get("static_yearindexes", True)

        dates = {}
        categories = {}

        # first we handle entries and categories
        listing = tools.walk(self._request, data_dir)

        for mem in listing:
            # skip the ones that have bad extensions
            ext = mem[mem.rfind(".") + 1:]
            if not ext in data["extensions"].keys():
                continue

            # grab the mtime of the entry file
            mtime = time.mktime(tools.filestat(self._request, mem))

            # remove the datadir from the front and the bit at the end
            mem = mem[len(data_dir):mem.rfind(".")]

            # this is the static filename
            fn = os.path.normpath(static_dir + mem)

            # grab the mtime of one of the statically rendered file
            try:
                smtime = os.stat(fn + "." + flavours[0])[8]
            except:
                smtime = 0

            # if the entry is more recent than the static, we want to
            # re-render
            if smtime < mtime or not incremental:

                # grab the categories
                temp = os.path.dirname(mem).split(os.sep)
                for i in range(len(temp) + 1):
                    p = os.sep.join(temp[0:i])
                    categories[p] = 0

                # grab the date
                mtime = time.localtime(mtime)
                year = time.strftime("%Y", mtime)
                month = time.strftime("%m", mtime)
                day = time.strftime("%d", mtime)

                if year_indexes:
                    dates[year] = 1

                if month_numbers:
                    dates[year + "/" + month] = 1
                    dates[year + "/" + month + "/" + day] = 1

                if month_names:
                    monthname = tools.num2month[month]
                    dates[year + "/" + monthname] = 1
                    dates[year + "/" + monthname + "/" + day] = 1

                # toss in the render queue
                for f in flavours:
                    render_me.append((mem + "." + f, ""))

        print "rendering %d entries." % len(render_me)

        # handle categories
        categories = categories.keys()
        categories.sort()

        # if they have stuff in their root category, it'll add a "/"
        # to the category list and we want to remove that because it's
        # a duplicate of "".
        if "/" in categories:
            categories.remove("/")

        print "rendering %d category indexes." % len(categories)

        for mem in categories:
            mem = os.path.normpath(mem + "/index.")
            for f in index_flavours:
                render_me.append((mem + f, ""))

        # now we handle dates
        dates = dates.keys()
        dates.sort()

        dates = ["/" + d for d in dates]

        print "rendering %d date indexes." % len(dates)

        for mem in dates:
            mem = os.path.normpath(mem + "/index.")
            for f in index_flavours:
                render_me.append((mem + f, ""))

        # now we handle arbitrary urls
        additional_stuff = config.get("static_urls", [])
        print "rendering %d arbitrary urls." % len(additional_stuff)

        for mem in additional_stuff:
            if mem.find("?") != -1:
                url = mem[:mem.find("?")]
                query = mem[mem.find("?") + 1:]
            else:
                url = mem
                query = ""

            render_me.append((url, query))

        # now we pass the complete render list to all the plugins via
        # cb_staticrender_filelist and they can add to the filelist
        # any (url, query) tuples they want rendered.
        print "(before) building %s files." % len(render_me)
        tools.run_callback(
            "staticrender_filelist", {
                'request': self._request,
                'filelist': render_me,
                'flavours': flavours,
                'incremental': incremental
            })

        render_me = sorted(set(render_me))

        print "building %s files." % len(render_me)

        for url, q in render_me:
            url = url.replace(os.sep, "/")
            print "rendering '%s' ..." % url

            tools.render_url_statically(dict(config), url, q)

        # we're done, clean up
        self.cleanup()
Example #26
0
    def generate_calendar(self):
        """
        Generates the calendar.  We'd like to walk the archives
        for things that happen in this month and mark the dates
        accordingly.  After doing that we pass it to a formatting
        method which turns the thing into HTML.
        """
        config = self._request.get_configuration()
        data = self._request.get_data()
        entry_list = data["entry_list"]

        root = config["datadir"]
        baseurl = config.get("base_url", "")

        self._today = time.localtime()

        if len(entry_list) == 0:
            # if there are no entries, we shouldn't even try to
            # do something fancy.
            self._cal = ""
            return

        view = list(entry_list[0]["timetuple"])

        # this comes in as '', 2001, 2002, 2003, ...  so we can convert it
        # without an issue
        temp = data.get("pi_yr")
        if not temp:
            view[0] = int(self._today[0])
        else:
            view[0] = int(temp)

        # the month is a bit harder since it can come in as "08", "", or
        # "Aug" (in the example of August).
        temp = data.get("pi_mo")
        if temp and temp.isdigit():
            view[1] = int(temp)
        elif temp and tools.month2num.has_key(temp):
            view[1] = int(tools.month2num[temp])
        else:
            view[1] = int(self._today[1])

        self._view = view = tuple(view)

        # if we're looking at a specific day, we figure out what it is
        if data.get("pi_yr") and data.get("pi_mo") and data.get("pi_da"):
            if data["pi_mo"].isdigit():
                mon = data["pi_mo"]
            else:
                mon = tools.month2num[data["pi_mo"]]

            self._specificday = (int(data.get("pi_yr", self._today[0])),
                                 int(mon),
                                 int(data.get("pi_da", self._today[2])))

        archive_list = tools.walk(self._request, root)

        yearmonth = {}

        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)

            # if we already have an entry for this date, we skip to the
            # next one because we've already done this processing
            day = str(timetuple[2]).rjust(2)
            if self._entries.has_key(day):
                continue

            # add an entry for yyyymm so we can figure out next/previous
            year = str(timetuple[0])
            dayzfill = string.zfill(timetuple[1], 2)
            yearmonth[year + dayzfill] = time.strftime("%b", timetuple)

            # if the entry isn't in the year/month we're looking at with
            # the calendar, then we skip to the next one
            if timetuple[0:2] != view[0:2]:
                continue

            # mark the entry because it's one we want to show
            datepiece = time.strftime("%Y/%b/%d", timetuple)
            self._entries[day] = (baseurl + "/" + datepiece, day)

        # Set the first day of the week (Sunday by default)
        first = config.get('calendar_firstweekday', 6)
        calendar.setfirstweekday(first)

        # create the calendar
        cal = calendar.monthcalendar(view[0], view[1])

        # insert the days of the week
        cal.insert(0, calendar.weekheader(2).split())

        # figure out next and previous links by taking the dict of
        # yyyymm strings we created, turning it into a list, sorting
        # them, and then finding "today"'s entry.  then the one before
        # it (index-1) is prev, and the one after (index+1) is next.
        keys = yearmonth.keys()
        keys.sort()
        thismonth = time.strftime("%Y%m", view)

        # do some quick adjustment to make sure we didn't pick a
        # yearmonth that's outside the yearmonths of the entries we
        # know about.
        if thismonth in keys:
            index = keys.index(thismonth)
        elif len(keys) == 0 or keys[0] > thismonth:
            index = 0
        else:
            index = len(keys) - 1

        # build the prev link
        if index == 0 or len(keys) == 0:
            prev = None
        else:
            prev = ("%s/%s/%s" % (baseurl, keys[index-1][:4],
                                  yearmonth[keys[index-1]]),
                    "&lt;")

        # build the next link
        if index == len(yearmonth)-1 or len(keys) == 0:
            next = None
        else:
            next = ("%s/%s/%s" % (baseurl, keys[index+1][:4],
                                  yearmonth[keys[index+1]]),
                    "&gt;")

        # insert the month name and next/previous links
        cal.insert(0, [prev, time.strftime("%B %Y", view), next])

        self._cal = self.format_with_css(cal)