Example #1
0
    def gen_linear_archive(self):
        config = self._request.get_configuration()
        data = self._request.get_data()
        root = config["datadir"]
        archives = {}
        archive_list = tools.get_entries(config, root)
        fulldict = {}
        fulldict.update(config)
        fulldict.update(data)

        template = config.get('archive_template',
                    '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />')
        for mem in archive_list:
            timetuple = tools.filestat(self._request, mem)
            timedict = {}
            for x in ["B", "b", "m", "Y", "y"]:
                timedict[x] = time.strftime("%" + x, timetuple)

            fulldict.update(timedict)
            if not (timedict['Y'] + timedict['m']) in archives:
                archives[timedict['Y'] + timedict['m']] = (template % fulldict)

        result = []
        for key in sorted(archives.keys(), reverse=True):
            result.append(archives[key])
        self._archives = '\n'.join(result)
Example #2
0
def cmd_buildtags(command, argv):
    """Command for building the tags index."""
    cfg = import_config()
    datadir = cfg['datadir']
    sep = cfg.get('tags_separator', ',')
    tagsfile = get_tagsfile(cfg)

    from douglas import tools
    from douglas.app import Douglas, initialize
    from douglas.entries import fileentry

    # Build a douglas object, initialize it, and run the start
    # callback.  This gives entry parsing related plugins a chance to
    # get their stuff together so that they work correctly.
    initialize(cfg)
    p = Douglas(cfg, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback("start", {"request": req})

    # Grab all the entries in the datadir
    entrylist = [fileentry.FileEntry(req, e, datadir)
                 for e in tools.get_entries(cfg, datadir)]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Example #3
0
    def test_fine(self):
        yesterday = time.time() - (60 * 60 * 24)

        self.generate_entry('test1.txt', yesterday)
        self.generate_entry('ignore1/test_ignore1.txt', yesterday)
        self.generate_entry('ignore2/test_ignore2.txt', yesterday)

        entries = get_entries(self.config, self.datadir)

        eq_(len(entries), 3)
Example #4
0
    def test_future(self):
        tomorrow = time.time() + (60 * 60 * 24)
        yesterday = time.time() - (60 * 60 * 24)

        self.generate_entry('test1.txt', yesterday)
        self.generate_entry('ignore1/test_ignore1.txt', yesterday)
        self.generate_entry('ignore2/test_ignore2.txt', tomorrow)

        entries = get_entries(self.config, self.datadir)

        eq_(len(entries), 2)
Example #5
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'entry_list':
        filelist = tools.get_entries(
            config, data['root_datadir'], int(config['depth']))
    elif data['bl_type'] == 'entry':
        filelist = [data['root_datadir']]
    else:
        filelist = []

    entrylist = [FileEntry(request, e, data["root_datadir"]) for e in filelist]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr"):
        datestr = "%s%s%s" % (data["pi_yr"],
                              data.get("pi_mo", ""),
                              data.get("pi_da", ""))
        entrylist = [
            x for x in entrylist
            if (time.strftime("%Y%m%d%H%M%S", x["timetuple"])
                .startswith(datestr))]

    args = {"request": request, "entry_list": entrylist}
    entrylist = tools.run_callback("sortlist",
                                   args,
                                   donefunc=lambda x: x is not None,
                                   defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entrylist}
    entrylist = tools.run_callback("truncatelist",
                                   args,
                                   donefunc=lambda x: x is not None,
                                   defaultfunc=blosxom_truncate_list_handler)

    return entrylist
Example #6
0
def cmd_category_to_tags(command, argv):
    """Converts the category to tags metadata for all entries.

    It adds the tags line as the second line and maintains the mtime
    for the file.

    """
    cfg = import_config()

    datadir = cfg['datadir']
    sep = cfg.get("tags_separator", ",")

    from douglas import tools
    from douglas.app import initialize

    initialize(cfg)
    filelist = tools.get_entries(cfg, datadir)

    if not datadir.endswith(os.sep):
        datadir = datadir + os.sep

    for mem in filelist:
        print "working on %s..." % mem

        category = os.path.dirname(mem)[len(datadir):]
        tags = category.split(os.sep)
        print "   adding tags %s" % tags
        tags = "#tags %s\n" % (sep.join(tags))

        atime, mtime = os.stat(mem)[7:9]

        with open(mem, 'r') as fp:
            data = fp.readlines()

        data.insert(1, tags)

        with open(mem, 'w') as fp:
            fp.write("".join(data))

        os.utime(mem, (atime, mtime))

    return 0
Example #7
0
    def categorydata(self):
        if self._categorydata is None:
            config = self.request.get_configuration()
            root = config["datadir"]

            # Build the list of all entries in the datadir
            entry_list = tools.get_entries(config, root)

            # Peel off the root dir from the list of entries
            entry_list = [mem[len(root) + 1:] for mem in entry_list]

            # Map categories to counts.
            category_map = {}
            for mem in entry_list:
                mem = os.path.dirname(mem)
                for par in parents(mem):
                    category_map[par] = category_map.get(par, 0) + 1

            self._categorydata = sorted(category_map.items())
        return self._categorydata
Example #8
0
def cmd_persistdate(command, argv):
    from douglas.cmdline import import_config
    config = import_config()

    datadir = config.py.get('datadir')

    if not datadir:
        raise ValueError('config.py has no datadir property.')

    from douglas import tools
    from douglas.app import Douglas

    p = Douglas(config.py, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback('start', {'request': req})

    filelist = tools.get_entries(config, datadir)
    print '%d files' % len(filelist)
    for fn in filelist:
        lines = open(fn, 'r').readlines()
        try:
            metadata = get_metadata(lines)
        except IndexError as exc:
            print '%s errored out: %s' % (fn, exc)
            continue

        if 'published' in metadata:
            print '%s already has metadata...' % fn
            continue

        print 'working on %s...' % fn
        timetuple = tools.filestat(req, fn)
        published = time.strftime('%Y-%m-%d %H:%M:%S', timetuple)
        lines.insert(1, '#published %s\n' % published)
        fp = open(fn, 'w')
        fp.write(''.join(lines))
        fp.close()
Example #9
0
    def run_compile(self, incremental=False):
        """Compiles the blog into an HTML site.

        This will go through all possible things in the blog and
        compile the blog to the ``compiledir`` specified in the config
        file.

        This figures out all the possible ``path_info`` settings and
        calls ``self.run()`` a bazillion times saving each file.

        :param incremental: Whether (True) or not (False) to compile
            incrementally. If we're incrementally compiling, then only
            the urls that are likely to have changed get re-compiled.

        """
        self.initialize()

        cfg = self._request.get_configuration()
        compiledir = cfg['compiledir']
        datadir = cfg['datadir']

        if not compiledir:
            print 'Error: You must set compiledir in your config file.'
            return 0

        print 'Compiling to "{0}".'.format(compiledir)
        if incremental:
            print 'Incremental is set.'
        print ''

        themes = cfg['compile_themes']
        index_themes = cfg['compile_index_themes']

        dayindexes = cfg['day_indexes']
        monthindexes = cfg['month_indexes']
        yearindexes = cfg['year_indexes']

        renderme = []
        dates = {}
        categories = {}

        # first we handle entries and categories
        listing = tools.get_entries(cfg, datadir)

        for mem in listing:
            # Skip files that have extensions we don't know what to do
            # with.
            ext = os.path.splitext(mem)[1].lstrip('.')
            if not ext in cfg['extensions'].keys():
                continue

            # Get the mtime of the entry.
            mtime = time.mktime(tools.filestat(self._request, mem))

            # remove the datadir from the front and the bit at the end
            mem = mem[len(datadir):mem.rfind('.')]

            # This is the compiled file filename.
            fn = os.path.normpath(compiledir + mem)

            if incremental:
                # If we're incrementally rendering, we check the mtime
                # for the compiled file for one of the themes. If the entry
                # is more recent than the compiled version, we recompile.
                # Otherwise we skip it.
                try:
                    smtime = os.stat(fn + '.' + themes[0])[8]
                    if smtime < mtime or not incremental:
                        continue

                except (IOError, OSError):
                    pass

            # Figure out category indexes to re-render.
            temp = os.path.dirname(mem).split(os.sep)
            for i in range(len(temp)+1):
                p = os.sep.join(temp[0:i])
                categories[p] = 0

            # Figure out year/month/day indexes to re-render.
            mtime = time.localtime(mtime)
            year = time.strftime('%Y', mtime)
            month = time.strftime('%m', mtime)
            day = time.strftime('%d', mtime)

            if yearindexes:
                dates[year] = 1

            if monthindexes:
                dates[year + '/' + month] = 1

            if dayindexes:
                dates[year + '/' + month + '/' + day] = 1

            # Toss each theme for this entry in the render queue.
            for f in themes:
                renderme.append((mem + '.' + f, ''))

        print '- Found {0} entry(es) ...'.format(len(renderme))

        if categories:
            categories = sorted(categories.keys())

            # if they have stuff in their root category, it'll add a "/"
            # to the category list and we want to remove that because it's
            # a duplicate of "".
            if '/' in categories:
                categories.remove('/')

            print '- Found {0} category index(es) ...'.format(len(categories))

            for mem in categories:
                mem = os.path.normpath(mem + '/index.')
                for f in index_themes:
                    renderme.append((mem + f, ''))

        if dates:
            dates = ['/' + d for d in sorted(dates.keys())]

            print '- Found {0} date index(es) ...'.format(len(dates))

            for mem in dates:
                mem = os.path.normpath(mem + '/index.')
                for f in index_themes:
                    renderme.append((mem + f, ''))

        additional_stuff = cfg['compile_urls']
        if additional_stuff:
            print '- Found {0} arbitrary url(s) ...'.format(
                len(additional_stuff))

            for mem in additional_stuff:
                if mem.find('?') != -1:
                    url = mem[:mem.find('?')]
                    query = mem[mem.find('?')+1:]
                else:
                    url = mem
                    query = ''

                renderme.append((url, query))

        # Pass the complete render list to all the plugins via
        # cb_compile_filelist and they can add to the filelist any
        # (url, query) tuples they want rendered.
        total = len(renderme)
        tools.run_callback('compile_filelist',
                           {'request': self._request,
                            'filelist': renderme,
                            'themes': themes,
                            'incremental': incremental})

        renderme = sorted(set(renderme))
        print '- Found {0} url(s) specified by plugins ...'.format(
            len(renderme) - total)

        print ''
        print 'Compiling {0} url(s) total.'.format(len(renderme))
        print ''

        print 'Rendering files ...'
        for url, q in renderme:
            url = url.replace(os.sep, '/')
            print '   Rendering {0} ...'.format(url)
            tools.render_url_statically(dict(cfg), url, q)

        # We're done, clean up
        self.cleanup()