Esempio n. 1
0
def cb_filelist(args):
    """Select the proper file entries to be displayed

    cb_filelist first looks for a keyword that is being searched. If it doesn't
    find it, it relinquishes control to the default filelist handler. If it
    does find a keyword, it returns file objects for all blog entries matching
    that keyword.

    @param args: A dictionary containing the request object
    @type args: Dictionary
    @return: A list of FileEntry objects to be rendered
    """
    from Pyblosxom.entries.fileentry import FileEntry
    from meta import pathjoin

    request = args['request']
    data = request.getData()
    config = request.getConfiguration()
    if not data.has_key('keyword'): return None

    entrylist = []
    metadata = metasearch('keywords')
    for f in metadata:
        if data['keyword'] in parsekw(metadata[f]):
            path = pathjoin(config['datadir'], f)
            entry = FileEntry(request, path, data['root_datadir'])
            entrylist.append((entry._mtime, entry))

    entrylist.sort()
    entrylist.reverse()
    entrylist = [x[1] for x in entrylist]  #remove dates

    return entrylist
Esempio n. 2
0
def blogger_getRecentPosts(request,
                           appkey,
                           blogid,
                           username,
                           password,
                           numberOfPosts=5):
    """
    Get recent posts from a blog tree
    """
    authenticate(request, username, password)
    config = request.getConfiguration()
    data = request.getData()
    from Pyblosxom.entries.fileentry import FileEntry
    from Pyblosxom import pyblosxom

    exts = tools.run_callback("entryparser",
                              {'txt': pyblosxom.blosxom_entry_parser},
                              mappingfunc=lambda x, y: y,
                              defaultfunc=lambda x: x)

    data['extensions'] = exts

    result = []
    dataList = []
    filelist = tools.Walk(request,
                          os.path.join(config['datadir'], blogid[1:]),
                          pattern=re.compile(r'.*\.(' + '|'.join(exts.keys()) +
                                             ')-?$'),
                          recurse=1)
    for ourfile in filelist:
        entry = FileEntry(request, ourfile, config['datadir'])
        dataList.append((entry._mtime, entry))

    # this sorts entries by mtime in reverse order.  entries that have
    # no mtime get sorted to the top.
    dataList.sort()
    dataList.reverse()
    dataList = [x[1] for x in dataList]

    count = 1
    for entry in dataList:
        result.append({
            'dateCreated':
            xmlrpclib.DateTime(entry['mtime']),
            'userid':
            '01',
            'postid':
            entry['filename'].replace(config['datadir'], ''),
            'content':
            open(entry['filename']).read()
        })
        if count >= int(numberOfPosts):
            break
        count += 1
    return result
Esempio n. 3
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'dir':
        file_list = tools.walk(request, data['root_datadir'],
                               int(config.get("depth", "0")))
    elif data['bl_type'] == 'file':
        file_list = [data['root_datadir']]
    else:
        file_list = []

    entry_list = [
        FileEntry(request, e, data["root_datadir"]) for e in file_list
    ]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr", ""):
        tmp_pi_mo = data.get("pi_mo", "")
        date_str = "%s%s%s" % (data.get(
            "pi_yr", ""), tools.month2num.get(
                tmp_pi_mo, tmp_pi_mo), data.get("pi_da", ""))
        entry_list = [
            x for x in entry_list if time.strftime(
                "%Y%m%d%H%M%S", x["timetuple"]).startswith(date_str)
        ]

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("sortlist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("truncatelist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_truncate_list_handler)

    return entry_list
Esempio n. 4
0
def metaWeblog_getPost(request, postid, username, password):
    """
    Get a single post from the server
    Part of the metaWeblog API

    @param request: the pyblosxom Request instance
    @type  request: Request

    @param postid: the id of the post
    @type postid: string

    @param username: the username
    @type  username: string

    @param password: the password
    @type  password: string

    @returns the post whose id is postid
    @rtype dict
    """
    # tools.log("getPost: postid: %s" % (postid,))
    authenticate(request, username, password)
    config = request.getConfiguration()

    # tools.log("datadir = %s, file = %s.txt" % (config['datadir'], postid))
    entry = FileEntry(request,
                      os.path.join(config['datadir'], "%s.txt" % postid),
                      config['datadir'])
    post = {
        'permaLink':
        "%s/%s/%s/%s#%s" % (config['base_url'], entry['yr'], entry['mo_num'],
                            entry['da'], entry['fn']),
        'title':
        entry['title'],
        'description':
        entry['body'],
        'postid':
        re.sub(r'^/', '', "%s/%s" % (entry['absolute_path'], entry['fn'])),
        'categories': [entry['absolute_path']],
        'dateCreated':
        xmlrpclib.DateTime(entry['w3cdate'])
    }
    return post
Esempio n. 5
0
def cb_filelist(args):
    """Override the filelist callback to provide caching"""
    request = args['request']
    http = request.getHttp()
    config = request.getConfiguration()

    dirname = config.get("cache_dir", "/tmp/cache")
    path = http.get('PATH_INFO', '').lstrip('/') or 'root'
    f = pathjoin(dirname, path)
    c = get_cache(f)

    #if this request has already been cached, get it and return it
    incache = c.get('filelist', None)
    if incache:
        return incache

    #if no cache found, we need to load our data
    data = request.getData()
    req_type = data['bl_type']

    if req_type == 'file':
        filelist = [data['root_datadir']]
    elif req_type == 'dir':
        filelist = walkpath(request, data['root_datadir'], 1)
    else:
        filelist = []
    for i in range(len(filelist)):
        filelist[i] = FileEntry(request, filelist[i], data['root_datadir'])
    filelist = sort(filelist)  #make this a sort callback?
    filelist = filelist[:config['num_entries']]

    #now cache the result
    for file_ in filelist:
        #we can't store the request (it breaks cPickle - why?), and we need
        #the metadata anyway, so generate it and remove the request link
        file_.getMetadata('')
        file_._request = None
    c['filelist'] = filelist
    put_cache(c, f)
    return filelist
Esempio n. 6
0
def cb_filelist(args):
    req = args["request"]

    pyhttp = req.getHttp()
    data = req.getData()
    config = req.getConfiguration()

    if not pyhttp["PATH_INFO"].startswith("/" + TRIGGER):
        return

    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    staticdir = config.get("staticdir", config['datadir'])

    staticdir = staticdir.replace("/", os.sep)
    if not staticdir[-1] == os.sep:
        staticdir = staticdir + os.sep

    page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER)+1:]

    if not page_name:
        return

    # FIXME - need to do a better job of sanitizing
    page_name = page_name.replace(os.sep, "/")

    if not page_name:
        return

    if page_name[-1] == os.sep:
        page_name = page_name[:-1]
    if page_name.find("/") > 0:
        page_name = page_name[page_name.rfind("/"):]

    # if the page has a flavour, we use that.  otherwise
    # we default to the static flavour
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    # we build our own config dict for the fileentry to kind of
    # fake it into loading this file correctly rather than
    # one of the entries.
    newdatadir = staticdir
    # config["datadir"] = newdatadir

    ext = tools.what_ext(data["extensions"].keys(), staticdir + page_name)

    if not ext:
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    filename = staticdir + page_name + "." + ext

    if not os.path.isfile(filename):
        return []

    fe = FileEntry(req, filename, staticdir)
    # now we evaluate python code blocks
    body = fe.getData()
    body = eval_python_blocks(req, body)
    body = "<!-- STATIC PAGE START -->\n\n" + body + "<!-- STATIC PAGE END -->\n"
    fe.setData(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "static"

    data['blog_title_with_path'] = config.get("blog_title", "") + " : " + fe.get("title", "")

    # set the datadir back
    config["datadir"] = datadir

    return [fe]
Esempio n. 7
0
def metaWeblog_getRecentPosts(request, blogid, username, password,
                              numberOfPosts):
    """
    Get the most recent posts
    Part of the metaWeblog API

    @param request: the pyblosxom Request instance
    @type  request: Request

    @param blogid: the id of the blog
    @type blogid: string

    @param username: the username
    @type  username: string

    @param password: the password
    @type  password: string

    @param numberOfPosts: the number of posts to retreive
    @type  numberOfPosts: int

    @returns list of dicts, one per post
    @rtype list
    """
    # tools.log("getRecentPosts blogid:%s count:%s" % (blogid, numberOfPosts))
    authenticate(request, username, password)
    config = request.getConfiguration()

    filelist = tools.Walk(request,
                          config['datadir'],
                          int(config['depth']),
                          pattern=_allEntriesPattern(request))

    entryList = []
    for f in filelist:
        entry = FileEntry(request, f, config['datadir'])
        entryList.append((entry._mtime, entry))
    entryList.sort()
    entryList.reverse()
    try:
        numberOfPosts = int(numberOfPosts)
    except:
        # tools.log("Couldn't convert numberOfPosts")
        numberOfPosts = 5
    entryList = [x[1] for x in entryList][:numberOfPosts]

    posts = [{
        'permaLink':
        "%s/%s/%s/%s#%s" %
        (config['base_url'], x['yr'], x['mo_num'], x['da'], x['fn']),
        'title':
        x['title'],
        'description':
        x['body'],
        'postid':
        re.sub(r'^/', '', "%s/%s" % (x['absolute_path'], x['fn'])),
        'categories': [x['absolute_path']],
        'dateCreated':
        xmlrpclib.DateTime(x['w3cdate'])
    } for x in entryList]

    return posts
Esempio n. 8
0
def cb_filelist(args):
    req = args["request"]

    pyhttp = req.get_http()
    data = req.get_data()
    config = req.get_configuration()
    page_name = None

    if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)):
        return

    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    pagesdir = config["pagesdir"]

    pagesdir = pagesdir.replace("/", os.sep)
    if not pagesdir[-1] == os.sep:
        pagesdir = pagesdir + os.sep

    pathinfo = pyhttp.get("PATH_INFO", "")
    path, ext = os.path.splitext(pathinfo)
    if pathinfo == "/" or path == "/index":
        page_name = "frontpage"
    else:
        page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:]

    if not page_name:
        return

    # FIXME - need to do a better job of sanitizing
    page_name = page_name.replace(os.sep, "/")

    if not page_name:
        return

    if page_name[-1] == os.sep:
        page_name = page_name[:-1]
    if page_name.find("/") > 0:
        page_name = page_name[page_name.rfind("/"):]

    # if the page has a flavour, we use that.  otherwise
    # we default to the default flavour.
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    ext = tools.what_ext(data["extensions"].keys(), pagesdir + page_name)

    if not ext:
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    filename = pagesdir + page_name + "." + ext

    if not os.path.isfile(filename):
        return []

    fe = FileEntry(req, filename, pagesdir)
    # now we evaluate python code blocks
    body = fe.get_data()
    body = eval_python_blocks(req, body)
    body = ("<!-- PAGES PAGE START -->\n\n" +
            body +
            "<!-- PAGES PAGE END -->\n")
    fe.set_data(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "pages"

    data['blog_title_with_path'] = (
        config.get("blog_title", "") + " : " + fe.get("title", ""))

    # set the datadir back
    config["datadir"] = datadir

    return [fe]
Esempio n. 9
0
def cb_handle(args):
    """

    @param args: a dict of plugin arguments
    @type args: dict
    """
    request = args['request']
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get('trackback_urltrigger', '/trackback')

    logger = tools.getLogger()

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        response = request.getResponse()
        response.addHeader("Content-type", "text/xml")

        form = request.getForm()

        message = "A trackback must have at least a URL field (see http://www.sixapart.com/pronet/docs/trackback_spec )"

        if form.has_key("url"):
            from comments import decode_form
            encoding = config.get('blog_encoding', 'iso-8859-1')
            decode_form(form, encoding)
            import time
            cdict = {
                'title': form.getvalue('title', ''),
                'author': form.getvalue('blog_name', ''),
                'pubDate': str(time.time()),
                'link': form['url'].value,
                'source': form.getvalue('blog_name', ''),
                'description': form.getvalue('excerpt', ''),
                'ipaddress': pyhttp.get('REMOTE_ADDR', ''),
                'type': 'trackback'
            }
            argdict = {"request": request, "comment": cdict}
            reject = tools.run_callback("trackback_reject",
                                        argdict,
                                        donefunc=lambda x: x != 0)
            if ((isinstance(reject, tuple) or isinstance(reject, list))
                    and len(reject) == 2):
                reject_code, reject_message = reject
            else:
                reject_code, reject_message = reject, "Trackback rejected."
            if reject_code == 1:
                print >> response, tb_bad_response % reject_message
                return 1

            from Pyblosxom.entries.fileentry import FileEntry
            from Pyblosxom.pyblosxom import Request
            from Pyblosxom.pyblosxom import PyBlosxom

            datadir = config['datadir']

            from comments import writeComment
            try:
                import os
                pi = path_info.replace(urltrigger, '')
                path = os.path.join(datadir, pi[1:])
                data = request.getData()
                ext = tools.what_ext(data['extensions'].keys(), path)
                entry = FileEntry(request, '%s.%s' % (path, ext), datadir)
                data = {}
                data['entry_list'] = [entry]
                # Format Author
                cdict['author'] = 'Trackback from %s' % form.getvalue(
                    'blog_name', '')
                writeComment(request, config, data, cdict, encoding)
                print >> response, tb_good_response
            except OSError:
                message = 'URI ' + path_info + " doesn't exist"
                logger.error(message)
                print >> response, tb_bad_response % message

        else:
            logger.error(message)
            print >> response, tb_bad_response % message

        # no further handling is needed
        return 1
    else:
        return 0
Esempio n. 10
0
def cb_filelist(args):
    req = args["request"]

    pyhttp = req.getHttp()
    data = req.getData()
    config = req.getConfiguration()

    if not pyhttp["PATH_INFO"].startswith("/" + TRIGGER):
        return

    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    staticdir = config.get("staticdir", config['datadir'])

    staticdir = staticdir.replace("/", os.sep)
    if not staticdir[-1] == os.sep:
        staticdir = staticdir + os.sep

    page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:]

    if not page_name:
        return

    # FIXME - need to do a better job of sanitizing
    page_name = page_name.replace(os.sep, "/")

    if not page_name:
        return

    if page_name[-1] == os.sep:
        page_name = page_name[:-1]
    if page_name.find("/") > 0:
        page_name = page_name[page_name.rfind("/"):]

    # if the page has a flavour, we use that.  otherwise
    # we default to the static flavour
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    # we build our own config dict for the fileentry to kind of
    # fake it into loading this file correctly rather than
    # one of the entries.
    newdatadir = staticdir
    # config["datadir"] = newdatadir

    ext = tools.what_ext(data["extensions"].keys(), staticdir + page_name)

    if not ext:
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    filename = staticdir + page_name + "." + ext

    if not os.path.isfile(filename):
        return []

    fe = FileEntry(req, filename, staticdir)
    # now we evaluate python code blocks
    body = fe.getData()
    body = eval_python_blocks(req, body)
    body = "<!-- STATIC PAGE START -->\n\n" + body + "<!-- STATIC PAGE END -->\n"
    fe.setData(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "static"

    data['blog_title_with_path'] = config.get(
        "blog_title", "") + " : " + fe.get("title", "")

    # set the datadir back
    config["datadir"] = datadir

    return [fe]
Esempio n. 11
0
def cb_handle(args):
    """

    @param args: a dict of plugin arguments
    @type args: dict
    """
    request = args['request']
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get('commentAPI_urltrigger','/commentAPI')

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        try:
            from Pyblosxom.entries.fileentry import FileEntry
            import os, sys
            pi = path_info.replace(urltrigger,'')
            if pi == '':
                sys.exit("<html><body>CommentAPI.cgi expects to receive an RSS item on standard input</body></html>")

            datadir = config['datadir']
            path = os.path.join(datadir, pi[1:])
            data = request.getData()
            filename = ''
            ext = tools.what_ext(data['extensions'].keys(),path)
            filename = os.path.normpath('%s.%s' % (path, ext))
            entry = FileEntry(request, filename, datadir )
            data = {}
            data['entry_list'] = [ entry ]

            commentString = sys.stdin.read()
            if commentString == None:
                sys.exit("<html><body>CommentAPI expects to receive an RSS item on standard input</body></html>")
            try:
                from xml.dom.minidom import parseString
                from xml.parsers.expat import ExpatError
                commentDOM = parseString(commentString)
            except ExpatError, ee:
                sys.exit("<html><body>The RSS Item you supplied could not be parsed.\nThe error occured at line %d, column %d</body></html>" % (ee.lineno,ee.offset))

            def dictFromDOM(dom, data, field, default=''):
                """
                Fill in a field in dict with the content of a element in the dom

                TODO: epydoc
                """
                value = dom.getElementsByTagName(field)
                if len(value) == 1:
                    data[field] = value[0].firstChild.data
                else:
                    data[field] = default

            # use dictFromDOM to fill in a dict with the stuff in the comment
            cdict = {}
            dictFromDOM(commentDOM, cdict, 'title')
            dictFromDOM(commentDOM, cdict, 'author')
            dictFromDOM(commentDOM, cdict, 'link') 
            dictFromDOM(commentDOM, cdict, 'source')
            # force an integer data stamp -- not in keeping with RFC 822,
            # but neither is RSS 
            import time
            cdict['pubDate'] = str(time.time())
            dictFromDOM(commentDOM, cdict, 'description')

            # must be done after plugin initialization
            from comments import writeComment    
            # write the comment (in the dict)
            writeComment(request, config, data, cdict, config['blog_encoding'])

            print "Content-Type: text/plain\n"
            print "OK"
        except OSError:
            print "Content-Type: text/plain\n"
            print "An Error Occurred"
Esempio n. 12
0
def cb_handle(args):
    """

    @param args: a dict of plugin arguments
    @type args: dict
    """
    request = args['request']
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get('trackback_urltrigger', '/trackback')

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        print "Content-type: text/xml"
        print

        form = cgi.FieldStorage()

        message = "not trackback"
        if form.has_key("title") and form.has_key("excerpt") and \
               form.has_key("url") and form.has_key("blog_name"):
            import time
            cdict = { 'title': form['title'].value, \
                      'author': 'Trackback from %s' % form['blog_name'].value, \
                      'pubDate' : str(time.time()), \
                      'link' : form['url'].value, \
                      'source' : form['blog_name'].value, \
                      'description' : form['excerpt'].value }
            from Pyblosxom.entries.fileentry import FileEntry
            from Pyblosxom.Request import Request
            from Pyblosxom.pyblosxom import PyBlosxom

            datadir = config['datadir']

            from comments import writeComment
            try:
                import os
                pi = path_info.replace(urltrigger, '')
                path = os.path.join(datadir, pi[1:])
                data = request.getData()
                ext = tools.what_ext(data['extensions'].keys(), path)
                entry = FileEntry(request, '%s.%s' % (path, ext), datadir)
                data = {}
                data['entry_list'] = [entry]
                writeComment(config, data, cdict)
                print tb_good_response
            except OSError:
                message = 'URI ' + path_info + " doesn't exist"
                tools.log(message)
                print tb_bad_response % message

        else:
            tools.log(message)
            print tb_bad_response % message

        import sys
        sys.stdout.flush()
        # no further handling is needed
        return 1
    else:
        return 0
Esempio n. 13
0
def cb_filelist(args):
    req = args["request"]

    pyhttp = req.get_http()
    data = req.get_data()
    config = req.get_configuration()
    page_name = None

    if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)):
        return

    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    pagesdir = config["pagesdir"]

    pagesdir = pagesdir.replace("/", os.sep)
    if not pagesdir[-1] == os.sep:
        pagesdir = pagesdir + os.sep

    pathinfo = pyhttp.get("PATH_INFO", "")
    path, ext = os.path.splitext(pathinfo)
    if pathinfo == "/" or path == "/index":
        page_name = "frontpage"
    else:
        page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:]

    if not page_name:
        return

    # FIXME - need to do a better job of sanitizing
    page_name = page_name.replace(os.sep, "/")

    if not page_name:
        return

    if page_name[-1] == os.sep:
        page_name = page_name[:-1]
    if page_name.find("/") > 0:
        page_name = page_name[page_name.rfind("/"):]

    # if the page has a flavour, we use that.  otherwise
    # we default to the default flavour.
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    ext = tools.what_ext(data["extensions"].keys(), pagesdir + page_name)

    if not ext:
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    filename = pagesdir + page_name + "." + ext

    if not os.path.isfile(filename):
        return []

    fe = FileEntry(req, filename, pagesdir)
    # now we evaluate python code blocks
    body = fe.get_data()
    body = eval_python_blocks(req, body)
    body = ("<!-- PAGES PAGE START -->\n\n" + body +
            "<!-- PAGES PAGE END -->\n")
    fe.set_data(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "pages"

    data['blog_title_with_path'] = (config.get("blog_title", "") + " : " +
                                    fe.get("title", ""))

    # set the datadir back
    config["datadir"] = datadir

    return [fe]
Esempio n. 14
0
def cb_filelist(args):
    """
    This handles kicking off wbgwiki functionality if we see a
    url that we handle.
    """
    req = args["request"]

    pyhttp = req.getHttp()
    config = req.getConfiguration()
    pathinfo = pyhttp["PATH_INFO"]

    if not pathinfo.startswith("/" + TRIGGER):
        return

    logger = tools.getLogger()

    data = req.getData()
    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    wikidir = config.get("wikidir", config['datadir'])

    # convert the / to os.sep so that we can use os.path stuff.
    wikidir = wikidir.replace("/", os.sep)
    if not wikidir.endswith(os.sep):
        wikidir = wikidir + os.sep

    page_name = pathinfo[len("/" + TRIGGER) + 1:]

    if not page_name:
        return

    page_name = page_name.replace("/", os.sep)

    if not page_name:
        return

    if page_name.endswith(os.sep):
        page_name = page_name[:-1]

    # if the page has a flavour, we use that.  otherwise
    # we default to the wiki flavour
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    # wikifile should hold the absolute path on the file system to
    # the wiki file we're looking at.  if it's in a parent directory
    # of wikidir, then we abort.
    wikifile = os.path.normpath(os.path.join(wikidir, page_name))
    if not wikifile.startswith(wikidir):
        logger.info("wiki file requested '%s' is not in wikidir." % wikifile)
        return []

    # we build our own config dict for the fileentry to kind of
    # fake it into loading this file correctly rather than
    # one of the entries.
    newdatadir = wikidir

    ext = tools.what_ext(data["extensions"].keys(), wikifile)

    if not ext:
        logger.info("wiki file '%s' does not exist." % wikifile)
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    wikifile = wikifile + "." + ext

    if not os.path.isfile(wikifile):
        return []

    fe = FileEntry(req, wikifile, wikidir)

    # now we evaluate python code blocks
    body = fe.getData()
    body = eval_python_blocks(req, body)
    body = "<!-- STATIC PAGE START -->\n\n%s\n<!-- STATIC PAGE END -->\n" % body

    # now we evaluate for wikilinks
    body = connect_links(config["base_url"], data["extensions"].keys(),
                         wikidir, body)

    fe.setData(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "wiki"

    data['blog_title_with_path'] = "%s : %s" % \
                   (config.get("blog_title", ""), fe.get("title_escaped", ""))

    # set the datadir back
    config["datadir"] = datadir

    return [fe]
Esempio n. 15
0
def cb_filelist(args):
    """
    This handles kicking off wbgwiki functionality if we see a
    url that we handle.
    """
    req = args["request"]

    pyhttp = req.getHttp()
    config = req.getConfiguration()
    pathinfo = pyhttp["PATH_INFO"]

    if not pathinfo.startswith("/" + TRIGGER):
        return

    logger = tools.getLogger()

    data = req.getData()
    data[INIT_KEY] = 1
    datadir = config["datadir"]
    data['root_datadir'] = config['datadir']
    wikidir = config.get("wikidir", config['datadir'])

    # convert the / to os.sep so that we can use os.path stuff.
    wikidir = wikidir.replace("/", os.sep)
    if not wikidir.endswith(os.sep):
        wikidir = wikidir + os.sep

    page_name = pathinfo[len("/" + TRIGGER)+1:]

    if not page_name:
        return

    page_name = page_name.replace("/", os.sep)

    if not page_name:
        return

    if page_name.endswith(os.sep):
        page_name = page_name[:-1]

    # if the page has a flavour, we use that.  otherwise
    # we default to the wiki flavour
    page_name, flavour = os.path.splitext(page_name)
    if flavour:
        data["flavour"] = flavour[1:]

    # wikifile should hold the absolute path on the file system to
    # the wiki file we're looking at.  if it's in a parent directory
    # of wikidir, then we abort.  
    wikifile = os.path.normpath(os.path.join(wikidir, page_name))
    if not wikifile.startswith(wikidir):
        logger.info("wiki file requested '%s' is not in wikidir." % wikifile)
        return []

    # we build our own config dict for the fileentry to kind of
    # fake it into loading this file correctly rather than
    # one of the entries.
    newdatadir = wikidir

    ext = tools.what_ext(data["extensions"].keys(), wikifile)

    if not ext:
        logger.info("wiki file '%s' does not exist." % wikifile)
        return []

    data['root_datadir'] = page_name + '.' + ext
    data['bl_type'] = 'file'
    wikifile = wikifile + "." + ext

    if not os.path.isfile(wikifile):
        return []

    fe = FileEntry(req, wikifile, wikidir)

    # now we evaluate python code blocks
    body = fe.getData()
    body = eval_python_blocks(req, body)
    body = "<!-- STATIC PAGE START -->\n\n%s\n<!-- STATIC PAGE END -->\n" % body

    # now we evaluate for wikilinks
    body = connect_links(config["base_url"],
                         data["extensions"].keys(),
                         wikidir,
                         body)

    fe.setData(body)

    fe["absolute_path"] = TRIGGER
    fe["fn"] = page_name
    fe["file_path"] = TRIGGER + "/" + page_name
    fe["template_name"] = "wiki"

    data['blog_title_with_path'] = "%s : %s" % \
                   (config.get("blog_title", ""), fe.get("title_escaped", ""))

    # set the datadir back
    config["datadir"] = datadir

    return [fe]