Ejemplo n.º 1
0
def readfile(filename, request):
    entryData = {}
    d = open(filename).read()

    # Grab title and body.
    title = d.split('\n')[0]
    body = d[len(title):]

    # Grab textile configuration.
    config = request.getConfiguration()
    head_offset = config.get('txtl_head_offset', 0)
    validate = config.get('txtl_validate', 0)
    output = config.get('txtl_output', 'ascii')
    encoding = config.get('txtl_encoding', 'latin-1')

    body = textile(body,
                   head_offset=head_offset,
                   validate=validate,
                   output=output,
                   encoding=encoding)

    entryData = {'title': title, 'body': body}

    # Call the postformat callbacks
    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entryData
    })

    return entryData
Ejemplo n.º 2
0
Archivo: txtl.py Proyecto: hylom/fusuma
def readfile(filename, request):
    entryData = {}
    d = open(filename).read()

    # Grab title and body.
    title = d.split('\n')[0]
    body  = d[len(title):]

    # Grab textile configuration.
    config = request.getConfiguration()
    head_offset = config.get('txtl_head_offset', 0)
    validate    = config.get('txtl_validate', 0)
    output      = config.get('txtl_output', 'ascii')
    encoding    = config.get('txtl_encoding', 'latin-1')
    
    body = textile(body, head_offset=head_offset, validate=validate, output=output, encoding=encoding)

    entryData = {'title': title,
                 'body': body}

    # Call the postformat callbacks
    tools.run_callback('postformat',
            {'request': request,
             'entry_data': entryData})
    
    return entryData
Ejemplo n.º 3
0
def readfile(filename, request):
    entry_data = {}
    lines = open(filename).readlines()

    if len(lines) == 0:
        return {"title": "", "body": ""}

    title = lines.pop(0).strip()

    # absorb meta data
    while lines and lines[0].startswith("#"):
        meta = lines.pop(0)
        # remove the hash
        meta = meta[1:].strip()
        meta = meta.split(" ", 1)
        # if there's no value, we append a 1
        if len(meta) == 1:
            meta.append("1")
        entry_data[meta[0].strip()] = meta[1].strip()

    body = parse(''.join(lines), request)
    entry_data["title"] = title
    entry_data["body"] = body

    # Call the postformat callbacks
    tools.run_callback('postformat', {'request': request,
                                      'entry_data': entry_data})
    return entry_data
Ejemplo n.º 4
0
def readfile(filename, request):
    logger = tools.get_logger()
    logger.info("Calling readfile for %s", filename)
    entry_data = {}
    lines = open(filename).readlines()

    if len(lines) == 0:
        return {"title": "", "body": ""}

    title = lines.pop(0).strip()

    # absorb meta data
    while lines and lines[0].startswith("#"):
        meta = lines.pop(0)
        # remove the hash
        meta = meta[1:].strip()
        meta = meta.split(" ", 1)
        # if there's no value, we append a 1
        if len(meta) == 1:
            meta.append("1")
        entry_data[meta[0].strip()] = meta[1].strip()

    body = parse("".join(lines), request)
    entry_data["title"] = title
    entry_data["body"] = body

    # Call the postformat callbacks
    tools.run_callback("postformat", {"request": request,
                                      "entry_data": entry_data})
    logger.info("Returning %r", entry_data)
    return entry_data
Ejemplo n.º 5
0
def readfile(filename, request):
    logger = tools.get_logger()
    logger.info("Calling readfile for %s", filename)
    entry_data = {}
    lines = open(filename).readlines()

    if len(lines) == 0:
        return {"title": "", "body": ""}

    title = lines.pop(0).strip()

    # absorb meta data
    while lines and lines[0].startswith("#"):
        meta = lines.pop(0)
        # remove the hash
        meta = meta[1:].strip()
        meta = meta.split(" ", 1)
        # if there's no value, we append a 1
        if len(meta) == 1:
            meta.append("1")
        entry_data[meta[0].strip()] = meta[1].strip()

    body = parse("".join(lines), request)
    entry_data["title"] = title
    entry_data["body"] = body

    # Call the postformat callbacks
    tools.run_callback("postformat", {
        "request": request,
        "entry_data": entry_data
    })
    logger.info("Returning %r", entry_data)
    return entry_data
Ejemplo n.º 6
0
def readfile(filename, request):
    """
    Reads a file and passes it to L{parse} to format in textile
   
    @param filename: the file in question
    @type filename: string
    @param request: The request object
    @type request: L{Pyblosxom.pyblosxom.Request} object
    @returns: Data of the entry
    @rtype: dict
    """
    entryData = {}
    d = open(filename).read()

    # Grab title and body.
    title = d.split('\n')[0]
    body = d[len(title):]

    # Parse content
    body = parse(body, request)

    entryData = {'title': title, 'body': body}

    # Call the postformat callbacks
    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entryData
    })

    return entryData
Ejemplo n.º 7
0
    def run(self, static=False):
        """This is the main loop for Pyblosxom.  This method will run
        the handle callback to allow registered handlers to handle the
        request.  If nothing handles the request, then we use the
        ``default_blosxom_handler``.

        :param static: True if Pyblosxom should execute in "static rendering
                       mode" and False otherwise.
        """
        self.initialize()

        # buffer the input stream in a StringIO instance if dynamic
        # rendering is used.  This is done to have a known/consistent
        # way of accessing incoming data.
        if not static:
            self.get_request().buffer_input_stream()

        # run the start callback
        tools.run_callback("start", {'request': self._request})

        # allow anyone else to handle the request at this point
        handled = tools.run_callback("handle", {'request': self._request},
                                     mappingfunc=lambda x, y: x,
                                     donefunc=lambda x: x)

        if not handled == 1:
            blosxom_handler(self._request)

        # do end callback
        tools.run_callback("end", {'request': self._request})

        # we're done, clean up.
        # only call this if we're not in static rendering mode.
        if not static:
            self.cleanup()
Ejemplo n.º 8
0
    def run_callback(self, callback="help"):
        """This method executes the start callback (initializing
        plugins), executes the requested callback, and then executes
        the end callback.

        This is useful for scripts outside of Pyblosxom that need to
        do things inside of the Pyblosxom framework.

        If you want to run a callback from a plugin, use
        ``tools.run_callback`` instead.

        :param callback: the name of the callback to execute.

        :returns: the results of the callback.
        """
        self.initialize()

        # run the start callback
        tools.run_callback("start", {'request': self._request})

        # invoke all callbacks for the 'callback'
        handled = tools.run_callback(callback,
                                     {'request': self._request},
                                     mappingfunc=lambda x,y:x,
                                     donefunc=lambda x:x)

        # do end callback
        tools.run_callback("end", {'request': self._request})

        return handled
Ejemplo n.º 9
0
    def run_callback(self, callback="help"):
        """This method executes the start callback (initializing
        plugins), executes the requested callback, and then executes
        the end callback.

        This is useful for scripts outside of Pyblosxom that need to
        do things inside of the Pyblosxom framework.

        If you want to run a callback from a plugin, use
        ``tools.run_callback`` instead.

        :param callback: the name of the callback to execute.

        :returns: the results of the callback.
        """
        self.initialize()

        # run the start callback
        tools.run_callback("start", {'request': self._request})

        # invoke all callbacks for the 'callback'
        handled = tools.run_callback(callback, {'request': self._request},
                                     mappingfunc=lambda x, y: x,
                                     donefunc=lambda x: x)

        # do end callback
        tools.run_callback("end", {'request': self._request})

        return handled
Ejemplo n.º 10
0
def readfile(filename, request):
    '''This may never be used'''
    logger = tools.get_logger()
    logger.info("Calling readfile for %s", filename)
    entry_data = {}
    lines = open(filename).readlines()

    if len(lines) == 0:
        return {"title": "", "body": ""}

    title = lines.pop(0).strip()

    # absorb meta data
    meta = {}
    while lines and lines[0].startswith("#"):
        m = re.match("#([^:]+):\s*(.*)\s*")
        if m and m.group(2):
            entry_data[m.group(1)] = m.group(2)
        elif m:
            entry_data[m.group(1)] = "1"
        lines.pop(0)

    body = parse("".join(lines), request)
    entry_data["title"] = title
    entry_data["body"] = body

    # Call the postformat callbacks
    tools.run_callback("postformat", {"request": request,
                                      "entry_data": entry_data})
    logger.info("Returning %r", entry_data)
    return entry_data
Ejemplo n.º 11
0
def cb_prepare(args):
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()
    http = request.get_http()
    entry_list = data["entry_list"]
    renderer = data["renderer"]

    if entry_list and entry_list[0].has_key('mtime'):
        # FIXME - this should be generalized to a callback for updated
        # things.
        mtime = entry_list[0]['mtime']
        latest_cmtime = -1
        if 'comment_dir' in config:
            latest_filename = os.path.join(config['comment_dir'], 'LATEST.cmt')

            if os.path.exists(latest_filename):
                latest = open(latest_filename)
                latest_cmtime = cPickle.load(latest)
                latest.close()

        if latest_cmtime > mtime:
            mtime = latest_cmtime

        # Get our first file timestamp for ETag and Last Modified
        # Last-Modified: Wed, 20 Nov 2002 10:08:12 GMT
        # ETag: "2bdc4-7b5-3ddb5f0c"
        last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                      time.gmtime(mtime))
        modified_since = http.get('HTTP_IF_MODIFIED_SINCE', '')

        if ((http.get('HTTP_IF_NONE_MATCH', '') == '"%s"' % mtime)
                or (http.get('HTTP_IF_NONE_MATCH', '') == '%s' % mtime)
                or (modified_since and calendar.timegm(
                    time.strptime(modified_since, '%a, %d %b %Y %H:%M:%S GMT'))
                    >= int(mtime))):

            renderer.add_header('Status', '304 Not Modified')
            renderer.add_header('ETag', '"%s"' % mtime)
            renderer.add_header('Last-Modified', '%s' % last_modified)

            # whack the content here so that we don't then go render it
            renderer.set_content(None)

            renderer.render()

            # Log request as "We have it!"
            tools.run_callback(
                "logrequest", {
                    'filename': config.get('logfile', ''),
                    'return_code': '304',
                    'request': request
                })

            return

        renderer.add_header('ETag', '"%s"' % mtime)
        renderer.add_header('Last-Modified', '%s' % last_modified)
Ejemplo n.º 12
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'dir':
        filelist = tools.walk(request,
                              data['root_datadir'],
                              int(config.get("depth", "0")))
    elif data['bl_type'] == 'file':
        filelist = [data['root_datadir']]
    else:
        filelist = []

    entrylist = [FileEntry(request, e, data["root_datadir"]) for e in filelist]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr", ""):
        tmp_pi_mo = data.get("pi_mo", "")
        datestr = "%s%s%s" % (data.get("pi_yr", ""),
                              tools.month2num.get(tmp_pi_mo, tmp_pi_mo),
                              data.get("pi_da", ""))
        entrylist = [x for x in entrylist
                     if time.strftime("%Y%m%d%H%M%S", x["timetuple"]).startswith(datestr)]

    ids = [e.get_id() for e in entrylist]
    dupes = []
    for e in entrylist:
        if (e._realfilename != e._filename) and (e._realfilename in ids):
            dupes.append(e)
    for e in dupes:
        entrylist.remove(e)

    args = {"request": request, "entry_list": entrylist}
    entrylist = tools.run_callback("sortlist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entrylist}    
    entrylist = tools.run_callback("truncatelist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_truncate_list_handler)

    return entrylist
Ejemplo n.º 13
0
def blosxom_entry_parser(filename, request):
    """Open up a ``.txt`` file and read its contents.  The first line
    becomes the title of the entry.  The other lines are the body of
    the entry.

    :param filename: a filename to extract data and metadata from
    :param request: a standard request object

    :returns: dict containing parsed data and meta data with the
              particular file (and plugin)
    """
    config = request.get_configuration()

    entry_data = {}

    f = open(filename, "r")
    lines = f.readlines()
    f.close()

    # the file has nothing in it...  so we're going to return a blank
    # entry data object.
    if len(lines) == 0:
        return {"title": "", "body": ""}

    # the first line is the title
    entry_data["title"] = lines.pop(0).strip()

    # absorb meta data lines which begin with a #
    while lines and lines[0].startswith("#"):
        meta = lines.pop(0)
        # remove the hash
        meta = meta[1:].strip()
        meta = meta.split(" ", 1)
        # if there's no value, we append a 1
        if len(meta) == 1:
            meta.append("1")
        entry_data[meta[0].strip()] = meta[1].strip()

    # call the preformat function
    args = {
        'parser': entry_data.get('parser', config.get('parser', 'plain')),
        'story': lines,
        'request': request
    }
    entry_data["body"] = tools.run_callback(
        'preformat',
        args,
        donefunc=lambda x: x is not None,
        defaultfunc=lambda x: ''.join(x['story']))

    # call the postformat callbacks
    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entry_data
    })

    return entry_data
Ejemplo n.º 14
0
def cb_prepare(args):
    request = args["request"]

    data = request.getData()
    config = request.getConfiguration()
    http = request.getHttp()
    entryList = data["entry_list"]
    renderer = data["renderer"]

    if entryList and entryList[0].has_key('mtime'):
        mtime = entryList[0]['mtime']
        latest_cmtime = - 1
        if config.has_key('comment_dir'):
            try: 
                import os.path
                latestFilename = os.path.join(config['comment_dir'],'LATEST.cmt')
                latest = open(latestFilename)
                import cPickle
                latest_cmtime = cPickle.load(latest)
                latest.close()
            except:
                pass
        if latest_cmtime > mtime:
            mtime = latest_cmtime

        import time

        # Get our first file timestamp for ETag and Last Modified
        # Last-Modified: Wed, 20 Nov 2002 10:08:12 GMT
        # ETag: "2bdc4-7b5-3ddb5f0c"
        lastModed = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(mtime))
        if ((http.get('HTTP_IF_NONE_MATCH','') == '"%s"' % mtime) or
            (http.get('HTTP_IF_NONE_MATCH','') == '%s' % mtime) or
            (http.get('HTTP_IF_MODIFIED_SINCE','') == lastModed)):

            renderer.addHeader('Status', '304 Not Modified',
                               'ETag', '"%s"' % mtime,
                               'Last-Modified', '%s' % lastModed)

            # whack the content here so that we don't then go render it
            renderer.setContent(None)

            renderer.render()

            from Pyblosxom import tools

            # Log request as "We have it!"
            tools.run_callback("logrequest",
                    {'filename':config.get('logfile',''),
                    'return_code': '304',
                    'request': request})

            return

        renderer.addHeader('ETag', '"%s"' % mtime,
                           'Last-Modified', '%s' % lastModed)
Ejemplo n.º 15
0
def cb_prepare(args):
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()
    http = request.get_http()
    entry_list = data["entry_list"]
    renderer = data["renderer"]

    if entry_list and entry_list[0].has_key('mtime'):
        # FIXME - this should be generalized to a callback for updated
        # things.
        mtime = entry_list[0]['mtime']
        latest_cmtime = - 1
        if 'comment_dir' in config:
            latest_filename = os.path.join(config['comment_dir'], 'LATEST.cmt')

            if os.path.exists(latest_filename):
                latest = open(latest_filename)
                latest_cmtime = cPickle.load(latest)
                latest.close()

        if latest_cmtime > mtime:
            mtime = latest_cmtime

        # Get our first file timestamp for ETag and Last Modified
        # Last-Modified: Wed, 20 Nov 2002 10:08:12 GMT
        # ETag: "2bdc4-7b5-3ddb5f0c"
        last_modified = time.strftime(
            '%a, %d %b %Y %H:%M:%S GMT', time.gmtime(mtime))
        modified_since = http.get('HTTP_IF_MODIFIED_SINCE', '')

        if ((http.get('HTTP_IF_NONE_MATCH', '') == '"%s"' % mtime) or
             (http.get('HTTP_IF_NONE_MATCH', '') == '%s' % mtime) or
             (modified_since and calendar.timegm(time.strptime(modified_since,'%a, %d %b %Y %H:%M:%S GMT' )) >= int(mtime))):

            renderer.add_header('Status', '304 Not Modified')
            renderer.add_header('ETag', '"%s"' % mtime)
            renderer.add_header('Last-Modified', '%s' % last_modified)

            # whack the content here so that we don't then go render it
            renderer.set_content(None)

            renderer.render()

            # Log request as "We have it!"
            tools.run_callback("logrequest",
                               {'filename': config.get('logfile', ''),
                                'return_code': '304',
                                'request': request})

            return

        renderer.add_header('ETag', '"%s"' % mtime)
        renderer.add_header('Last-Modified', '%s' % last_modified)
Ejemplo n.º 16
0
def blosxom_entry_parser(filename, request):
    """Open up a ``.txt`` file and read its contents.  The first line
    becomes the title of the entry.  The other lines are the body of
    the entry.

    :param filename: a filename to extract data and metadata from
    :param request: a standard request object

    :returns: dict containing parsed data and meta data with the
              particular file (and plugin)
    """
    config = request.get_configuration()

    entry_data = {}

    f = open(filename, "r")
    lines = f.readlines()
    f.close()

    # the file has nothing in it...  so we're going to return a blank
    # entry data object.
    if len(lines) == 0:
        return {"title": "", "body": ""}

    # the first line is the title
    entry_data["title"] = lines.pop(0).strip()

    # absorb meta data lines which begin with a #
    while lines and lines[0].startswith("#"):
        meta = lines.pop(0)
        # remove the hash
        meta = meta[1:].strip()
        meta = meta.split(" ", 1)
        # if there's no value, we append a 1
        if len(meta) == 1:
            meta.append("1")
        entry_data[meta[0].strip()] = meta[1].strip()

    # call the preformat function
    args = {'parser': entry_data.get('parser', config.get('parser', 'plain')),
            'story': lines,
            'request': request}
    entry_data["body"] = tools.run_callback(
        'preformat',
        args,
        donefunc=lambda x: x != None,
        defaultfunc=lambda x: ''.join(x['story']))

    # call the postformat callbacks
    tools.run_callback('postformat',
                      {'request': request,
                       'entry_data': entry_data})

    return entry_data
Ejemplo n.º 17
0
def blosxom_file_list_handler(args):
    """This is the default handler for getting entries.  It takes the
    request object in and figures out which entries based on the
    default behavior that we want to show and generates a list of
    EntryBase subclass objects which it returns.

    :param args: dict containing the incoming Request object

    :returns: the content we want to render
    """
    request = args["request"]

    data = request.get_data()
    config = request.get_configuration()

    if data['bl_type'] == 'dir':
        file_list = tools.walk(request, data['root_datadir'],
                               int(config.get("depth", "0")))
    elif data['bl_type'] == 'file':
        file_list = [data['root_datadir']]
    else:
        file_list = []

    entry_list = [
        FileEntry(request, e, data["root_datadir"]) for e in file_list
    ]

    # if we're looking at a set of archives, remove all the entries
    # that aren't in the archive
    if data.get("pi_yr", ""):
        tmp_pi_mo = data.get("pi_mo", "")
        date_str = "%s%s%s" % (data.get(
            "pi_yr", ""), tools.month2num.get(
                tmp_pi_mo, tmp_pi_mo), data.get("pi_da", ""))
        entry_list = [
            x for x in entry_list if time.strftime(
                "%Y%m%d%H%M%S", x["timetuple"]).startswith(date_str)
        ]

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("sortlist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_sort_list_handler)

    args = {"request": request, "entry_list": entry_list}
    entry_list = tools.run_callback("truncatelist",
                                    args,
                                    donefunc=lambda x: x != None,
                                    defaultfunc=blosxom_truncate_list_handler)

    return entry_list
Ejemplo n.º 18
0
def cb_prepare(args):
    request = args["request"]

    data = request.getData()
    config = request.getConfiguration()
    http = request.getHttp()
    entryList = data["entry_list"]
    renderer = data["renderer"]

    if entryList and entryList[0].has_key('mtime'):
        mtime = entryList[0]['mtime']
        latest_cmtime = -1
        if config.has_key('comment_dir'):
            try:
                import os.path
                latestFilename = os.path.join(config['comment_dir'],
                                              'LATEST.cmt')
                latest = open(latestFilename)
                import cPickle
                latest_cmtime = cPickle.load(latest)
                latest.close()
            except:
                pass
        if latest_cmtime > mtime:
            mtime = latest_cmtime
        import time
        # Get our first file timestamp for ETag and Last Modified
        # Last-Modified: Wed, 20 Nov 2002 10:08:12 GMT
        # ETag: "2bdc4-7b5-3ddb5f0c"
        lastModed = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                  time.gmtime(mtime))
        if ((http.get('HTTP_IF_NONE_MATCH', '') == '"%s"' % mtime)
                or (http.get('HTTP_IF_NONE_MATCH', '') == '%s' % mtime)
                or (http.get('HTTP_IF_MODIFIED_SINCE', '') == lastModed)):
            renderer.addHeader('Status', '304 Not Modified', 'ETag',
                               '"%s"' % mtime, 'Last-Modified',
                               '%s' % lastModed)
            renderer.render()

            from Pyblosxom import tools
            # Log request as "We have it!"
            tools.run_callback(
                "logrequest", {
                    'filename': config.get('logfile', ''),
                    'return_code': '304',
                    'request': request
                })

        renderer.addHeader('ETag', '"%s"' % mtime, 'Last-Modified',
                           '%s' % lastModed)
Ejemplo n.º 19
0
def readfile(filename, request):
    entryData = {}
    d = open(filename).read()
    title = d.split('\n')[0]
    d = d[len(title):]
    body = parse(d)
    entryData = {'title': title,
                 'body': body}
    # Call the postformat callbacks
    tools.run_callback('postformat',
            {'request': request,
             'entry_data': entryData})
    
    return entryData
Ejemplo n.º 20
0
def readfile(filename, request):
    entryData = {}
    d = open(filename).read()
    title = d.split('\n')[0]
    d = d[len(title):]
    body = parse(d)
    entryData = {'title': title, 'body': body}
    # Call the postformat callbacks
    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entryData
    })

    return entryData
Ejemplo n.º 21
0
def cb_handle(args):
    """
    This takes in a request and handles the request.
    """
    request = args["request"]
    pyhttp = request.getHttp()
    config = request.getConfiguration()
    response = request.getResponse()
    

    urltrigger = config.get("xmlrpc_urltrigger", "/RPC")

    if pyhttp.get("PATH_INFO", "").startswith(urltrigger):
        try:
            content_length = int(pyhttp.get("CONTENT_LENGTH", "0"))
            maxrequest_length = config.get("xmlrpc_maxrequest_length", 10000)

            if content_length > maxrequest_length:
                raise ValueError, 'Request too large - %s bytes' % content_length

        except: 
            result = xmlrpclib.dumps(xmlrpclib.Fault(1, "%s: %s" % sys.exc_info()[:2]))
            response.addHeader('Content-Type', 'text/xml')
            response.addHeader('Content-Length', '%d' % len(result))

            response.write(result)
            response.flush()
            return 1

        # everything is cool--so we handle the xmlrpc request
        data = request.read(content_length)

        # here we call the xmlrpc_init callback passing in a dict.
        # each function that registers with this callback adds their
        # xmlrpc functions to the dict.
        args = tools.run_callback("xmlrpc_register", 
                        {"request": request, "methods": {}},
                        mappingfunc=lambda x,y:y,
                        defaultfunc=lambda x:x)
        methods = args["methods"]
        request.addData({'xmlrpc_methods': methods})

        """
        if os.environ.get("REQUEST_METHOD", "") == "GET":
            resp = []
            resp.append("Content-Type: text/plain")
            resp.append("\n")
            resp.append("-" * 40)
            resp.append("Methods Defined:")
            resp.append("-" * 40)
            for mem in methods.keys():
                resp.append("  %s -> %s" % (mem, methods[mem]))
            sys.stdout.write("\n".join(resp))
            return 1
        """

        x = XMLRPCHandler(request, data).process()

        # return 1 indicating we've handled the request
        return 1
Ejemplo n.º 22
0
def category_to_tags(command, argv):
    """Goes through all entries and converts the category to tags
    metadata.

    It adds the tags line as the second line.

    It maintains the mtime for the file.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback(
        "entryparser", {"txt": pyblosxom.blosxom_entry_parser},
        mappingfunc=lambda x, y: y,
        defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)

    if not datadir.endswith(os.sep):
        datadir = datadir + os.sep

    for mem in filelist:
        print "working on %s..." % mem

        category = os.path.dirname(mem)[len(datadir):]
        tags = category.split(os.sep)
        print "   adding tags %s" % tags
        tags = "#tags %s\n" % (sep.join(tags))

        atime, mtime = os.stat(mem)[7:9]

        fp = open(mem, "r")
        data = fp.readlines()
        fp.close()

        data.insert(1, tags)

        fp = open(mem, "w")
        fp.write("".join(data))
        fp.close()

        os.utime(mem, (atime, mtime))

    return 0
Ejemplo n.º 23
0
def category_to_tags(command, argv):
    """Goes through all entries and converts the category to tags
    metadata.

    It adds the tags line as the second line.

    It maintains the mtime for the file.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")

    from Pyblosxom.pyblosxom import Request
    from Pyblosxom.blosxom import blosxom_entry_parser
    from Pyblosxom import tools

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback("entryparser",
                                            {"txt": blosxom_entry_parser},
                                            mappingfunc=lambda x, y: y,
                                            defaultfunc=lambda x: x)

    req = Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)

    if not datadir.endswith(os.sep):
        datadir = datadir + os.sep

    for mem in filelist:
        print "working on %s..." % mem

        category = os.path.dirname(mem)[len(datadir):]
        tags = category.split(os.sep)
        print "   adding tags %s" % tags
        tags = "#tags %s\n" % (sep.join(tags))

        atime, mtime = os.stat(mem)[7:9]

        fp = open(mem, "r")
        data = fp.readlines()
        fp.close()

        data.insert(1, tags)

        fp = open(mem, "w")
        fp.write("".join(data))
        fp.close()

        os.utime(mem, (atime, mtime))

    return 0
Ejemplo n.º 24
0
def parse(filename, request):
    # FIXME: We're assuming the file is UTF-8.
    lines = [line.decode('utf8') for line in open(filename, 'r').readlines()]

    # Call the preformat function, this should return us an HTML string for
    # the body of the entry.
    args = {'parser': 'simplemarkdown', 'story': lines, 'request': request}
    body = tools.run_callback(
        'preformat', args, donefunc=lambda x: x is not None,
        defaultfunc=lambda x: ''.join(x['story']))

    entry_data = {'body': body}  # No title!

    # Call the postformat callbacks.
    tools.run_callback(
        'postformat', {'request': request, 'entry_data': entry_data})

    return entry_data
Ejemplo n.º 25
0
def entry_parser(contents, request):
    """ Run the appropriate entry parser for this file. Return the data dict
  generated by the entry parser.
  """
    # this *should* be just these three lines. however,
    # pyblosxom.blosxom_entry_parser only reads from a file, *not* from a
    # string. so, until that's fixed, we have to duplicate its code below.
    #   fn, ext = os.path.splitext(full_path)
    #   entry_parser = data['extensions'][ext[1:]]
    #   parsed = entry_parser(full_path, request)

    config = request.getConfiguration()
    entryData = {}

    lines = [l + '\n' for l in contents.split('\n')]
    title = lines.pop(0).strip()
    entryData['title'] = title

    # absorb meta data lines which begin with a #
    while lines and lines[0].startswith('#'):
        meta = lines.pop(0)
        meta = meta[1:].strip()  # remove the hash
        meta = meta.split(' ', 1)
        entryData[meta[0].strip()] = meta[1].strip()

    # Call the preformat function
    args = {
        'parser': entryData.get('parser', config.get('parser', 'plain')),
        'story': lines,
        'request': request
    }
    entryData['body'] = tools.run_callback(
        'preformat',
        args,
        donefunc=lambda x: x != None,
        defaultfunc=lambda x: ''.join(x['story']))

    # Call the postformat callbacks
    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entryData
    })

    return entryData
Ejemplo n.º 26
0
    def initialize(self):
        """The initialize step further initializes the Request by
        setting additional information in the ``data`` dict,
        registering plugins, and entryparsers.
        """
        data = self._request.get_data()
        pyhttp = self._request.get_http()
        config = self._request.get_configuration()

        # initialize the locale, if wanted (will silently fail if locale
        # is not available)
        if config.get('locale', None):
            try:
                locale.setlocale(locale.LC_ALL, config['locale'])
            except locale.Error:
                # invalid locale
                pass

        # initialize the tools module
        tools.initialize(config)

        data["pyblosxom_version"] = __version__
        data['pi_bl'] = ''

        # if the user specifies base_url in config, we use that.
        # otherwise we compose it from SCRIPT_NAME in the environment
        # or we leave it blank.
        if not "base_url" in config:
            if pyhttp.has_key('SCRIPT_NAME'):
                # allow http and https
                config['base_url'] = '%s://%s%s' % \
                                     (pyhttp['wsgi.url_scheme'],
                                      pyhttp['HTTP_HOST'],
                                      pyhttp['SCRIPT_NAME'])
            else:
                config["base_url"] = ""

        # take off the trailing slash for base_url
        if config['base_url'].endswith("/"):
            config['base_url'] = config['base_url'][:-1]

        datadir = config["datadir"]
        if datadir.endswith("/") or datadir.endswith("\\"):
            datadir = datadir[:-1]
            config['datadir'] = datadir

        # import and initialize plugins
        plugin_utils.initialize_plugins(config.get("plugin_dirs", []),
                                        config.get("load_plugins", None))

        # entryparser callback is run here first to allow other
        # plugins register what file extensions can be used
        data['extensions'] = tools.run_callback("entryparser",
                                        {'txt': blosxom_entry_parser},
                                        mappingfunc=lambda x,y:y,
                                        defaultfunc=lambda x:x)
Ejemplo n.º 27
0
    def initialize(self):
        """The initialize step further initializes the Request by
        setting additional information in the ``data`` dict,
        registering plugins, and entryparsers.
        """
        data = self._request.get_data()
        py_http = self._request.get_http()
        config = self._request.get_configuration()

        # initialize the locale, if wanted (will silently fail if locale
        # is not available)
        if config.get('locale', None):
            try:
                locale.setlocale(locale.LC_ALL, config['locale'])
            except locale.Error:
                # invalid locale
                pass

        # initialize the tools module
        tools.initialize(config)

        data["pyblosxom_version"] = __version__
        data['pi_bl'] = ''

        # if the user specifies base_url in config, we use that.
        # otherwise we compose it from SCRIPT_NAME in the environment
        # or we leave it blank.
        if not "base_url" in config:
            if py_http.has_key('SCRIPT_NAME'):
                # allow http and https
                config['base_url'] = '%s://%s%s' % \
                                     (py_http['wsgi.url_scheme'],
                                      py_http['HTTP_HOST'],
                                      py_http['SCRIPT_NAME'])
            else:
                config["base_url"] = ""

        # take off the trailing slash for base_url
        if config['base_url'].endswith("/"):
            config['base_url'] = config['base_url'][:-1]

        data_dir = config["datadir"]
        if data_dir.endswith("/") or data_dir.endswith("\\"):
            data_dir = data_dir[:-1]
            config['datadir'] = data_dir

        # import and initialize plugins
        plugin_utils.initialize_plugins(config.get("plugin_dirs", []),
                                        config.get("load_plugins", None))

        # entryparser callback is run here first to allow other
        # plugins register what file extensions can be used
        data['extensions'] = tools.run_callback("entryparser",
                                                {'txt': blosxom_entry_parser},
                                                mappingfunc=lambda x, y: y,
                                                defaultfunc=lambda x: x)
Ejemplo n.º 28
0
def readfile(filename, request):
    entryData = {}
    lines = open(filename).readlines()

    if len(lines) == 0:
        return {"title": "", "body": ""}

    body, title = parse(''.join(lines), request)
    if not title:
        import os.path
        title = os.path.basename(filename).replace('.bib', '')
    entryData["title"] = title
    entryData["body"] = body

    tools.run_callback('postformat', {
        'request': request,
        'entry_data': entryData
    })
    return entryData
Ejemplo n.º 29
0
def blogger_getRecentPosts(request,
                           appkey,
                           blogid,
                           username,
                           password,
                           numberOfPosts=5):
    """
    Get recent posts from a blog tree
    """
    authenticate(request, username, password)
    config = request.getConfiguration()
    data = request.getData()
    from Pyblosxom.entries.fileentry import FileEntry
    from Pyblosxom import pyblosxom

    exts = tools.run_callback("entryparser",
                              {'txt': pyblosxom.blosxom_entry_parser},
                              mappingfunc=lambda x, y: y,
                              defaultfunc=lambda x: x)

    data['extensions'] = exts

    result = []
    dataList = []
    filelist = tools.Walk(request,
                          os.path.join(config['datadir'], blogid[1:]),
                          pattern=re.compile(r'.*\.(' + '|'.join(exts.keys()) +
                                             ')-?$'),
                          recurse=1)
    for ourfile in filelist:
        entry = FileEntry(request, ourfile, config['datadir'])
        dataList.append((entry._mtime, entry))

    # this sorts entries by mtime in reverse order.  entries that have
    # no mtime get sorted to the top.
    dataList.sort()
    dataList.reverse()
    dataList = [x[1] for x in dataList]

    count = 1
    for entry in dataList:
        result.append({
            'dateCreated':
            xmlrpclib.DateTime(entry['mtime']),
            'userid':
            '01',
            'postid':
            entry['filename'].replace(config['datadir'], ''),
            'content':
            open(entry['filename']).read()
        })
        if count >= int(numberOfPosts):
            break
        count += 1
    return result
Ejemplo n.º 30
0
def readfile(filename, request):
    """
    Reads a file and passes it to L{parse} to format in moinmoin wiki

    @param filename: the file in question
    @param request: The request object
    @type filename: string
    @type request: L{Pyblosxom.pyblosxom.Request} object
    @returns: Data of the entry
    @rtype: dict
    """
    entryData = {}
    d = open(filename).read()
    entryData['title'] = d.split('\n')[0]
    d = d[len(entryData['title']):] 
    entryData['body'] = parse(d)
    # Call the postformat callbacks
    tools.run_callback('postformat',
            {'request': request,
             'entry_data': entryData})
    return entryData
Ejemplo n.º 31
0
def entry_parser(contents, request):
  """ Run the appropriate entry parser for this file. Return the data dict
  generated by the entry parser.
  """
  # this *should* be just these three lines. however,
  # pyblosxom.blosxom_entry_parser only reads from a file, *not* from a
  # string. so, until that's fixed, we have to duplicate its code below.
#   fn, ext = os.path.splitext(full_path)
#   entry_parser = data['extensions'][ext[1:]]
#   parsed = entry_parser(full_path, request)

  config = request.getConfiguration()
  entryData = {}
  
  lines = [l + '\n' for l in contents.split('\n')]
  title = lines.pop(0).strip()
  entryData['title'] = title

  # absorb meta data lines which begin with a #
  while lines and lines[0].startswith('#'):
      meta = lines.pop(0)
      meta = meta[1:].strip()     # remove the hash
      meta = meta.split(' ', 1)
      entryData[meta[0].strip()] = meta[1].strip()

  # Call the preformat function
  args = {'parser': entryData.get('parser', config.get('parser', 'plain')),
          'story': lines,
          'request': request}
  entryData['body'] = tools.run_callback('preformat', 
                                         args,
                                         donefunc = lambda x:x != None,
                                         defaultfunc = lambda x: ''.join(x['story']))

  # Call the postformat callbacks
  tools.run_callback('postformat',
                    {'request': request,
                     'entry_data': entryData})
      
  return entryData
Ejemplo n.º 32
0
def buildtags(command, argv):
    """Command for building the tags index."""
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom.pyblosxom import Pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    # build a Pyblosxom object, initialize it, and run the start
    # callback.  this gives entry parsing related plugins a chance to
    # get their stuff together so that they work correctly.
    p = Pyblosxom(config.py, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback("start", {"request": req})

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Ejemplo n.º 33
0
def buildtags(command, argv):
    """Command for building the tags index."""
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom.pyblosxom import blosxom_entry_parser, Pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    # build a Pyblosxom object, initialize it, and run the start
    # callback.  this gives entry parsing related plugins a chance to
    # get their stuff together so that they work correctly.
    p = Pyblosxom(config.py, {})
    p.initialize()
    req = p.get_request()
    tools.run_callback("start", {"request": req})

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Ejemplo n.º 34
0
    def run(self, static=False):
        """This is the main loop for Pyblosxom.  This method will run
        the handle callback to allow registered handlers to handle the
        request.  If nothing handles the request, then we use the
        ``default_blosxom_handler``.

        :param static: True if Pyblosxom should execute in "static rendering
                       mode" and False otherwise.
        """
        self.initialize()

        # buffer the input stream in a StringIO instance if dynamic
        # rendering is used.  This is done to have a known/consistent
        # way of accessing incomming data.
        if static == False:
            self.get_request().buffer_input_stream()

        # run the start callback
        tools.run_callback("start", {'request': self._request})

        # allow anyone else to handle the request at this point
        handled = tools.run_callback("handle",
                        {'request': self._request},
                        mappingfunc=lambda x,y:x,
                        donefunc=lambda x:x)

        if not handled == 1:
            blosxom_handler(self._request)

        # do end callback
        tools.run_callback("end", {'request': self._request})

        # we're done, clean up.
        # only call this if we're not in static rendering mode.
        if static == False:
            self.cleanup()
Ejemplo n.º 35
0
 def _run_callback(self, chain, input):
     """
     Makes calling blosxom callbacks a bit easier since they all have the
     same mechanics.  This function merely calls run_callback with
     the arguments given and a mappingfunc.
     
     The mappingfunc copies the "template" value from the output to the 
     input for the next function.
     
     Refer to run_callback for more details.
     """
     input.update({"renderer":self})
     return tools.run_callback(chain, input, 
                         mappingfunc=lambda x,y: x,
                         defaultfunc=lambda x:x)
Ejemplo n.º 36
0
def cb_filelist(args):
    from Pyblosxom.pyblosxom import blosxom_truncate_list_handler
    from Pyblosxom import tools

    # handles /trigger/tag to show all the entries tagged that
    # way
    req = args["request"]

    pyhttp = req.get_http()
    data = req.get_data()
    config = req.get_configuration()

    trigger = "/" + config.get("tags_trigger", "tag")
    if not pyhttp["PATH_INFO"].startswith(trigger):
        return

    datadir = config["datadir"]
    tagsfile = get_tagsfile(config)
    tagsdata = loadfile(tagsfile)

    tag = pyhttp["PATH_INFO"][len(trigger) + 1:]
    filelist = tagsdata.get(tag, [])
    if not filelist:
        tag, ext = os.path.splitext(tag)
        filelist = tagsdata.get(tag, [])
        if filelist:
            data["flavour"] = ext[1:]

    from Pyblosxom.entries import fileentry
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    # sort the list by mtime
    entrylist = [(e._mtime, e) for e in entrylist]
    entrylist.sort()
    entrylist.reverse()
    entrylist = [e[1] for e in entrylist]

    data["truncate"] = config.get("truncate_tags", True)

    args = {"request": req, "entry_list": entrylist}
    entrylist = tools.run_callback("truncatelist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_truncate_list_handler)

    return entrylist
Ejemplo n.º 37
0
    def _run_callback(self, chain, input):
        """
        Makes calling blosxom callbacks a bit easier since they all
        have the same mechanics.  This function merely calls
        run_callback with the arguments given and a mappingfunc.

        The mappingfunc copies the ``template`` value from the output to
        the input for the next function.

        Refer to run_callback for more details.
        """
        input.update({"renderer": self})
        input.update({"request": self._request})

        return tools.run_callback(chain, input,
                                  mappingfunc=lambda x,y: x,
                                  defaultfunc=lambda x:x)
Ejemplo n.º 38
0
def cb_filelist(args):
    from Pyblosxom.blosxom import blosxom_truncate_list_handler
    from Pyblosxom import tools

    # handles /trigger/tag to show all the entries tagged that
    # way
    req = args["request"]

    pyhttp = req.get_http()
    data = req.get_data()
    config = req.get_configuration()

    trigger = "/" + config.get("tags_trigger", "tag")
    if not pyhttp["PATH_INFO"].startswith(trigger):
        return

    datadir = config["datadir"]
    tagsfile = get_tagsfile(config)
    tagsdata = loadfile(tagsfile)

    tag = pyhttp["PATH_INFO"][len(trigger) + 1:]
    filelist = tagsdata.get(tag, [])
    if not filelist:
        tag, ext = os.path.splitext(tag)
        filelist = tagsdata.get(tag, [])
        if filelist:
            data["flavour"] = ext[1:]

    from Pyblosxom.entries import fileentry
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    # sort the list by mtime
    entrylist = [(e._mtime, e) for e in entrylist]
    entrylist.sort()
    entrylist.reverse()
    entrylist = [e[1] for e in entrylist]

    data["truncate"] = config.get("truncate_tags", True)

    args = {"request": req, "entry_list": entrylist}
    entrylist = tools.run_callback("truncatelist",
                                   args,
                                   donefunc=lambda x: x != None,
                                   defaultfunc=blosxom_truncate_list_handler)

    return entrylist
Ejemplo n.º 39
0
def blogger_getRecentPosts(request, appkey, blogid, username, password,
        numberOfPosts=5):
    """
    Get recent posts from a blog tree
    """
    authenticate(request, username, password)
    config = request.getConfiguration()
    data = request.getData()
    from Pyblosxom.entries.fileentry import FileEntry
    from Pyblosxom import pyblosxom

    exts = tools.run_callback("entryparser",
                {'txt': pyblosxom.blosxom_entry_parser},
                mappingfunc=lambda x,y:y,
                defaultfunc=lambda x:x)

    data['extensions'] = exts
    
    result = []
    dataList = []
    filelist = tools.Walk(request, os.path.join(config['datadir'], blogid[1:]), 
            pattern = re.compile(r'.*\.(' +
            '|'.join(exts.keys()) + ')-?$'), 
            recurse = 1)
    for ourfile in filelist:
        entry = FileEntry(request, ourfile, config['datadir'])
        dataList.append((entry._mtime, entry))

    # this sorts entries by mtime in reverse order.  entries that have
    # no mtime get sorted to the top.
    dataList.sort()
    dataList.reverse()
    dataList = [x[1] for x in dataList]
    
    count = 1
    for entry in dataList:
        result.append({'dateCreated' : xmlrpclib.DateTime(entry['mtime']),
                       'userid' : '01',
                       'postid' : entry['filename'].replace(
                                  config['datadir'],''),
                       'content' : open(entry['filename']).read()})
        if count >= int(numberOfPosts):
            break
        count += 1
    return result
Ejemplo n.º 40
0
def buildtags(command, argv):
    """Builds the tags index.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)

    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback(
        "entryparser", {"txt": pyblosxom.blosxom_entry_parser},
        mappingfunc=lambda x, y: y,
        defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Ejemplo n.º 41
0
def buildtags(command, argv):
    """Builds the tags index.
    """
    import config

    datadir = config.py.get("datadir")
    if not datadir:
        raise ValueError("config.py has no datadir property.")

    sep = config.py.get("tags_separator", ",")
    tagsfile = get_tagsfile(config.py)
    
    from Pyblosxom import pyblosxom
    from Pyblosxom import tools
    from Pyblosxom.entries import fileentry

    data = {}

    # register entryparsers so that we parse all possible file types.
    data["extensions"] = tools.run_callback("entryparser",
                                            {"txt": pyblosxom.blosxom_entry_parser},
                                            mappingfunc=lambda x, y:y,
                                            defaultfunc=lambda x: x)

    req = pyblosxom.Request(config.py, {}, data)

    # grab all the entries in the datadir
    filelist = tools.walk(req, datadir)
    entrylist = [fileentry.FileEntry(req, e, datadir) for e in filelist]

    tags_to_files = {}
    for mem in entrylist:
        tagsline = mem["tags"]
        if not tagsline:
            continue
        tagsline = [t.strip() for t in tagsline.split(sep)]
        for t in tagsline:
            tags_to_files.setdefault(t, []).append(mem["filename"])

    savefile(tagsfile, tags_to_files)
    return 0
Ejemplo n.º 42
0
def get_handlers():
    try:
        from config import py as cfg
        plugin_utils.initialize_plugins(cfg.get("plugin_dirs", []),
                                        cfg.get("load_plugins", None))
    except ImportError:
        pass

    handlers_dict = dict([(v[0], (v[1], v[2])) for v in DEFAULT_HANDLERS])
    handlers_dict = run_callback("commandline", handlers_dict,
                                 mappingfunc=lambda x, y: y,
                                 defaultfunc=lambda x: x)

    # test the handlers, drop any that aren't the right return type,
    # and print a warning.
    handlers = []
    for k, v in handlers_dict.items():
        if not len(v) == 2 or not callable(v[0]) or not isinstance(v[1], str):
            print "Plugin returned '%s' for commandline." % ((k, v),)
            continue
        handlers.append((k, v[0], v[1]))

    return handlers
Ejemplo n.º 43
0
def get_handlers():
    try:
        from config import py as cfg
        plugin_utils.initialize_plugins(cfg.get("plugin_dirs", []),
                                        cfg.get("load_plugins", None))
    except ImportError:
        pass

    handlers_dict = dict([(v[0], (v[1], v[2])) for v in DEFAULT_HANDLERS])
    handlers_dict = run_callback("commandline",
                                 handlers_dict,
                                 mappingfunc=lambda x, y: y,
                                 defaultfunc=lambda x: x)

    # test the handlers, drop any that aren't the right return type,
    # and print a warning.
    handlers = []
    for k, v in handlers_dict.items():
        if not len(v) == 2 or not callable(v[0]) or not isinstance(v[1], str):
            print "Plugin returned '%s' for commandline." % ((k, v), )
            continue
        handlers.append((k, v[0], v[1]))

    return handlers
Ejemplo n.º 44
0
def cb_handle(args):
    """

    @param args: a dict of plugin arguments
    @type args: dict
    """
    request = args['request']
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get('trackback_urltrigger','/trackback')

    logger = tools.getLogger()

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        response = request.getResponse()
        response.addHeader("Content-type", "text/xml")

        form = request.getForm()

        message = "A trackback must have at least a URL field (see http://www.sixapart.com/pronet/docs/trackback_spec )"

        if form.has_key("url"):
            from comments import decode_form
            decode_form(form, config['blog_encoding'])
            import time
            cdict = { 'title': form.getvalue('title', ''), \
                      'author': 'Trackback from %s' % form.getvalue('blog_name', ''), \
                      'pubDate' : str(time.time()), \
                      'link' : form['url'].value, \
                      'source' : form.getvalue('blog_name', ''), \
                      'description' : form.getvalue('excerpt', '') }

            argdict = { "request": request, "comment": cdict }
            reject = tools.run_callback("trackback_reject",
                                        argdict,
                                        donefunc=lambda x:x != 0)
            if ((isinstance(reject, tuple) or isinstance(reject, list))
                and len(reject) == 2):
                reject_code, reject_message = reject
            else:
                reject_code, reject_message = reject, "Trackback rejected."
            if reject_code == 1:
                print >> response, tb_bad_response % reject_message
                return 1

            from Pyblosxom.entries.fileentry import FileEntry
            from Pyblosxom.pyblosxom import Request
            from Pyblosxom.pyblosxom import PyBlosxom

            datadir = config['datadir']

            from comments import writeComment    
            try:
                import os
                pi = path_info.replace(urltrigger,'')
                path = os.path.join(datadir, pi[1:])
                data = request.getData()
                ext = tools.what_ext(data['extensions'].keys(), path)
                entry = FileEntry(request, '%s.%s' % (path, ext), datadir )
                data = {}
                data['entry_list'] = [ entry ]
                writeComment(request, config, data, cdict, config['blog_encoding'])
                print >> response, tb_good_response
            except OSError:
                message = 'URI '+path_info+" doesn't exist"
                logger.error(message)
                print >> response, tb_bad_response % message

        else:
            logger.error(message)
            print >> response, tb_bad_response % message

        # no further handling is needed
        return 1
    else:
        return 0
Ejemplo n.º 45
0
def cb_prepare(args):
    """
    Handle comment related HTTP POST's.
    
    @param request: pyblosxom request object
    @type request: a Pyblosxom request object
    """
    request = args["request"]
    form = request.get_http()['form']
    config = request.get_configuration()
    data = request.get_data()
    pyhttp = request.get_http()

    # first we check to see if we're going to print out comments
    # the default is not to show comments
    data['display_comment_default'] = 0        

    # check to see if they have "showcomments=yes" in the querystring
    qstr = pyhttp.get('QUERY_STRING', None)
    if qstr != None:
        parsed_qs = cgi.parse_qs(qstr)
        if 'showcomments' in parsed_qs:
            if parsed_qs['showcomments'][0] == 'yes':
                data['display_comment_default'] = 1

    # check to see if the bl_type is "file"
    if "bl_type" in data and data["bl_type"] == "file":
        data["bl_type_file"] = "yes"
        data['display_comment_default'] = 1
 
    # second, we check to see if they're posting a comment and we
    # need to write the comment to disk.
    posting = (('ajax' in form and form['ajax'].value == 'post') or
               not "preview" in form)
    if (("title" in form and "author" in form
         and "body" in form and posting)):

        encoding = config.get('blog_encoding', 'utf-8')
        decode_form(form, encoding)

        body = form['body'].value
        author = form['author'].value
        title = form['title'].value
        url = ('url' in form and [form['url'].value] or [''])[0]

        # sanitize incoming data
        body = sanitize(body)
        author = sanitize(author)
        title = sanitize(title)

        # it doesn't make sense to add nofollow to link here, but we should
        # escape it. If you don't like the link escaping, I'm not attached 
        # to it.
        cmt_time = time.time()
        w3cdate = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(cmt_time))
        date = time.strftime('%a %d %b %Y', time.gmtime(cmt_time))
        cdict = {'title': title,
                 'author': author,
                 'pubDate': str(cmt_time),
                 'w3cdate': w3cdate,
                 'date': date,
                 'link': massage_link(url),
                 'source': '',
                 'description': add_dont_follow(body, config)}

        keys = form.keys()
        keys = [k for k in keys
                if k not in ["title", "url", "author", "body", "description"]]
        for k in keys:
            cdict[k] = form[k].value

        if 'email' in form:
            cdict['email'] = form['email'].value

        cdict['ipaddress'] = pyhttp.get('REMOTE_ADDR', '')

        # record the comment's timestamp, so we can extract it and send it
        # back alone, without the rest of the page, if the request was ajax.
        data['cmt_time'] = float(cdict['pubDate'])

        argdict = {"request": request, "comment": cdict}
        reject = tools.run_callback("comment_reject",
                                    argdict,
                                    donefunc=lambda x:x != 0)
        if (((isinstance(reject, tuple) or isinstance(reject, list))
             and len(reject) == 2)):
            reject_code, reject_message = reject
        else:
            reject_code, reject_message = reject, "Comment rejected."
        if reject_code == 1:
            data["comment_message"] = reject_message
            data["rejected"] = True
        else:
            data["comment_message"] = write_comment(request, config, data, \
                                                   cdict, encoding)
Ejemplo n.º 46
0
def blosxom_handler(request):
    """This is the default blosxom handler.

    It calls the renderer callback to get a renderer.  If there is no
    renderer, it uses the blosxom renderer.

    It calls the pathinfo callback to process the path_info http
    variable.

    It calls the filelist callback to build a list of entries to
    display.

    It calls the prepare callback to do any additional preparation
    before rendering the entries.

    Then it tells the renderer to render the entries.

    :param request: the request object.
    """
    config = request.get_configuration()
    data = request.get_data()

    # go through the renderer callback to see if anyone else wants to
    # render.  this renderer gets stored in the data dict for
    # downstream processing.
    rend = tools.run_callback('renderer', {'request': request},
                              donefunc=lambda x: x is not None,
                              defaultfunc=lambda x: None)

    if not rend:
        # get the renderer we want to use
        rend = config.get("renderer", "blosxom")

        # import the renderer
        rend = tools.importname("Pyblosxom.renderers", rend)

        # get the renderer object
        rend = rend.Renderer(request, config.get("stdoutput", sys.stdout))

    data['renderer'] = rend

    # generate the timezone variable
    data["timezone"] = time.tzname[time.localtime()[8]]

    # process the path info to determine what kind of blog entry(ies)
    # this is
    tools.run_callback("pathinfo", {"request": request},
                       donefunc=lambda x: x is not None,
                       defaultfunc=blosxom_process_path_info)

    # call the filelist callback to generate a list of entries
    data["entry_list"] = tools.run_callback(
        "filelist", {"request": request},
        donefunc=lambda x: x is not None,
        defaultfunc=blosxom_file_list_handler)

    # figure out the blog-level mtime which is the mtime of the head
    # of the entry_list
    entry_list = data["entry_list"]
    if isinstance(entry_list, list) and len(entry_list) > 0:
        mtime = entry_list[0].get("mtime", time.time())
    else:
        mtime = time.time()
    mtime_tuple = time.localtime(mtime)
    mtime_gmtuple = time.gmtime(mtime)

    data["latest_date"] = time.strftime('%a, %d %b %Y', mtime_tuple)

    # Make sure we get proper 'English' dates when using standards
    loc = locale.getlocale(locale.LC_ALL)
    locale.setlocale(locale.LC_ALL, 'C')

    data["latest_w3cdate"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', mtime_gmtuple)
    data['latest_rfc822date'] = time.strftime('%a, %d %b %Y %H:%M GMT',
                                              mtime_gmtuple)

    # set the locale back
    locale.setlocale(locale.LC_ALL, loc)

    # we pass the request with the entry_list through the prepare
    # callback giving everyone a chance to transform the data.  the
    # request is modified in place.
    tools.run_callback("prepare", {"request": request})

    # now we pass the entry_list through the renderer
    entry_list = data["entry_list"]
    renderer = data['renderer']

    if renderer and not renderer.rendered:
        if entry_list:
            renderer.set_content(entry_list)
            # Log it as success
            tools.run_callback(
                "logrequest", {
                    'filename': config.get('logfile', ''),
                    'return_code': '200',
                    'request': request
                })
        else:
            renderer.add_header('Status', '404 Not Found')
            renderer.set_content({
                'title':
                'The page you are looking for is not available',
                'body':
                'Somehow I cannot find the page you want. ' +
                'Go Back to <a href="%s">%s</a>?' %
                (config["base_url"], config["blog_title"])
            })
            # Log it as failure
            tools.run_callback(
                "logrequest", {
                    'filename': config.get('logfile', ''),
                    'return_code': '404',
                    'request': request
                })
        renderer.render()

    elif not renderer:
        output = config.get('stdoutput', sys.stdout)
        output.write("Content-Type: text/plain\n\n" +
                     "There is something wrong with your setup.\n" +
                     "Check your config files and verify that your " +
                     "configuration is correct.\n")

    cache = tools.get_cache(request)
    if cache:
        cache.close()
Ejemplo n.º 47
0
def cb_handle(args):
    request = args["request"]
    pyhttp = request.get_http()
    config = request.get_configuration()

    urltrigger = config.get("trackback_urltrigger", "/trackback")

    logger = tools.get_logger()

    path_info = pyhttp["PATH_INFO"]
    if path_info.startswith(urltrigger):
        response = request.get_response()
        response.add_header("Content-type", "text/xml")

        form = request.get_form()

        message = (
            "A trackback must have at least a URL field (see " "http://www.sixapart.com/pronet/docs/trackback_spec)"
        )

        if "url" in form:
            from comments import decode_form

            encoding = config.get("blog_encoding", "iso-8859-1")
            decode_form(form, encoding)
            import time

            cdict = {
                "title": form.getvalue("title", ""),
                "author": form.getvalue("blog_name", ""),
                "pubDate": str(time.time()),
                "link": form["url"].value,
                "source": form.getvalue("blog_name", ""),
                "description": form.getvalue("excerpt", ""),
                "ipaddress": pyhttp.get("REMOTE_ADDR", ""),
                "type": "trackback",
            }
            argdict = {"request": request, "comment": cdict}
            reject = tools.run_callback("trackback_reject", argdict, donefunc=lambda x: x != 0)
            if isinstance(reject, (tuple, list)) and len(reject) == 2:
                reject_code, reject_message = reject
            else:
                reject_code, reject_message = reject, "Trackback rejected."

            if reject_code == 1:
                print >> response, tb_bad_response % reject_message
                return 1

            from Pyblosxom.entries.fileentry import FileEntry

            datadir = config["datadir"]

            from comments import writeComment

            try:
                import os

                pi = path_info.replace(urltrigger, "")
                path = os.path.join(datadir, pi[1:])
                data = request.get_data()
                ext = tools.what_ext(data["extensions"].keys(), path)
                entry = FileEntry(request, "%s.%s" % (path, ext), datadir)
                data = {}
                data["entry_list"] = [entry]
                # Format Author
                cdict["author"] = "Trackback from %s" % form.getvalue("blog_name", "")
                writeComment(request, config, data, cdict, encoding)
                print >> response, tb_good_response
            except OSError:
                message = "URI " + path_info + " doesn't exist"
                logger.error(message)
                print >> response, tb_bad_response % message

        else:
            logger.error(message)
            print >> response, tb_bad_response % message

        # no further handling is needed
        return 1
    return 0
Ejemplo n.º 48
0
def pingback(request, source, target):
    logger = tools.getLogger()
    logger.info("pingback started")
    source_file = urllib.urlopen(source.split('#')[0])
    if source_file.headers.get('error', '') == '404':
        raise Fault(0x0010, "Target %s not exists" % target)
    source_page = parser()
    source_page.feed(source_file.read())
    source_file.close()

    if source_page.title == "": source_page.title = source
    
    if target in source_page.hrefs:
        target_entry = fileFor(request, target)

        body = ''
        try:
            from rssfinder import getFeeds
            from rssparser import parse

            baseurl=source.split("#")[0]
            for feed in getFeeds(baseurl):
                for item in parse(feed)['items']:
                    if item['link']==source:
                        if 'title' in item: source_page.title = item['title']
                        if 'content_encoded' in item: body = item['content_encoded'].strip()
                        if 'description' in item: body = item['description'].strip() or body
                        body=re.compile('<.*?>',re.S).sub('',body)
                        body=re.sub('\s+',' ',body)
                        body=body[:body.rfind(' ',0,250)][:250] + " ...<br />"
        except:
            pass

        cmt = {'title':source_page.title, \
               'author':'Pingback from %s' % source_page.title,
               'pubDate' : str(time.time()), \
               'link': source,
               'source' : '',
               'description' : body}
        
        # run anti-spam plugins
        argdict = { "request": request, "comment": cmt }
        reject = tools.run_callback("trackback_reject",
                                    argdict,
                                    donefunc=lambda x:x != 0)
        if ((isinstance(reject, tuple) or isinstance(reject, list))
            and len(reject) == 2):
            reject_code, reject_message = reject
        else:
            reject_code, reject_message = reject, "Pingback rejected."
        if reject_code == 1:
            raise Fault(0x0031, reject_message)

        from comments import writeComment
        config = request.getConfiguration()
        data = request.getData()
        data['entry_list'] = [ target_entry ]

        # TODO: Check if comment from the URL exists
        writeComment(request, config, data, cmt, config['blog_encoding'])
               
        return "success pinging %s from %s\n" % (target, source)
    else:
        raise Fault(0x0011, "%s does not point to %s" % (source, target))
Ejemplo n.º 49
0
def cb_handle(args):
    """

    @param args: a dict of plugin arguments
    @type args: dict
    """
    request = args['request']
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get('trackback_urltrigger', '/trackback')

    logger = tools.getLogger()

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        response = request.getResponse()
        response.addHeader("Content-type", "text/xml")

        form = request.getForm()

        message = "A trackback must have at least a URL field (see http://www.sixapart.com/pronet/docs/trackback_spec )"

        if form.has_key("url"):
            from comments import decode_form
            encoding = config.get('blog_encoding', 'iso-8859-1')
            decode_form(form, encoding)
            import time
            cdict = {
                'title': form.getvalue('title', ''),
                'author': form.getvalue('blog_name', ''),
                'pubDate': str(time.time()),
                'link': form['url'].value,
                'source': form.getvalue('blog_name', ''),
                'description': form.getvalue('excerpt', ''),
                'ipaddress': pyhttp.get('REMOTE_ADDR', ''),
                'type': 'trackback'
            }
            argdict = {"request": request, "comment": cdict}
            reject = tools.run_callback("trackback_reject",
                                        argdict,
                                        donefunc=lambda x: x != 0)
            if ((isinstance(reject, tuple) or isinstance(reject, list))
                    and len(reject) == 2):
                reject_code, reject_message = reject
            else:
                reject_code, reject_message = reject, "Trackback rejected."
            if reject_code == 1:
                print >> response, tb_bad_response % reject_message
                return 1

            from Pyblosxom.entries.fileentry import FileEntry
            from Pyblosxom.pyblosxom import Request
            from Pyblosxom.pyblosxom import PyBlosxom

            datadir = config['datadir']

            from comments import writeComment
            try:
                import os
                pi = path_info.replace(urltrigger, '')
                path = os.path.join(datadir, pi[1:])
                data = request.getData()
                ext = tools.what_ext(data['extensions'].keys(), path)
                entry = FileEntry(request, '%s.%s' % (path, ext), datadir)
                data = {}
                data['entry_list'] = [entry]
                # Format Author
                cdict['author'] = 'Trackback from %s' % form.getvalue(
                    'blog_name', '')
                writeComment(request, config, data, cdict, encoding)
                print >> response, tb_good_response
            except OSError:
                message = 'URI ' + path_info + " doesn't exist"
                logger.error(message)
                print >> response, tb_bad_response % message

        else:
            logger.error(message)
            print >> response, tb_bad_response % message

        # no further handling is needed
        return 1
    else:
        return 0
Ejemplo n.º 50
0
def cb_handle(args):
    request = args['request']
    pyhttp = request.get_http()
    config = request.get_configuration()

    urltrigger = config.get('trackback_urltrigger', '/trackback')

    logger = tools.get_logger()

    path_info = pyhttp['PATH_INFO']
    if path_info.startswith(urltrigger):
        response = request.get_response()
        response.add_header("Content-type", "text/xml")

        form = request.get_form()

        message = ("A trackback must have at least a URL field (see "
                   "http://www.sixapart.com/pronet/docs/trackback_spec)")

        if "url" in form:
            from comments import decode_form
            encoding = config.get('blog_encoding', 'iso-8859-1')
            decode_form(form, encoding)
            import time
            cdict = {'title': form.getvalue('title', ''),
                     'author': form.getvalue('blog_name', ''),
                     'pubDate': str(time.time()),
                     'link': form['url'].value,
                     'source': form.getvalue('blog_name', ''),
                     'description': form.getvalue('excerpt', ''),
                     'ipaddress': pyhttp.get('REMOTE_ADDR', ''),
                     'type': 'trackback'
                     }
            argdict = {"request": request, "comment": cdict}
            reject = tools.run_callback("trackback_reject",
                                        argdict,
                                        donefunc=lambda x: x != 0)
            if isinstance(reject, (tuple, list)) and len(reject) == 2:
                reject_code, reject_message = reject
            else:
                reject_code, reject_message = reject, "Trackback rejected."

            if reject_code == 1:
                print >> response, tb_bad_response % reject_message
                return 1

            from Pyblosxom.entries.fileentry import FileEntry

            datadir = config['datadir']

            from comments import writeComment
            try:
                import os
                pi = path_info.replace(urltrigger, '')
                path = os.path.join(datadir, pi[1:])
                data = request.get_data()
                ext = tools.what_ext(data['extensions'].keys(), path)
                entry = FileEntry(request, '%s.%s' % (path, ext), datadir)
                data = {}
                data['entry_list'] = [entry]
                # Format Author
                cdict['author'] = (
                    'Trackback from %s' % form.getvalue('blog_name', ''))
                writeComment(request, config, data, cdict, encoding)
                print >> response, tb_good_response
            except OSError:
                message = 'URI ' + path_info + " doesn't exist"
                logger.error(message)
                print >> response, tb_bad_response % message

        else:
            logger.error(message)
            print >> response, tb_bad_response % message

        # no further handling is needed
        return 1
    return 0
Ejemplo n.º 51
0
def pingback(request, source, target):
    logger = tools.getLogger()
    logger.info("pingback started")
    source_file = urllib.urlopen(source.split('#')[0])
    if source_file.headers.get('error', '') == '404':
        raise Fault(0x0010, "Target %s not exists" % target)
    source_page = parser()
    source_page.feed(source_file.read())
    source_file.close()

    if source_page.title == "": source_page.title = source
    
    if target in source_page.hrefs:
        target_entry = fileFor(request, target)

        body = ''
        try:
            from rssfinder import getFeeds
            from rssparser import parse

            baseurl=source.split("#")[0]
            for feed in getFeeds(baseurl):
                for item in parse(feed)['items']:
                    if item['link']==source:
                        if 'title' in item: source_page.title = item['title']
                        if 'content_encoded' in item: body = item['content_encoded'].strip()
                        if 'description' in item: body = item['description'].strip() or body
                        body=re.compile('<.*?>',re.S).sub('',body)
                        body=re.sub('\s+',' ',body)
                        body=body[:body.rfind(' ',0,250)][:250] + " ...<br />"
        except:
            pass

        cmt = {'title':source_page.title, \
               'author':'Pingback from %s' % source_page.title,
               'pubDate' : str(time.time()), \
               'link': source,
               'source' : '',
               'description' : body}
        
        # run anti-spam plugins
        argdict = { "request": request, "comment": cmt }
        reject = tools.run_callback("trackback_reject",
                                    argdict,
                                    donefunc=lambda x:x != 0)
        if ((isinstance(reject, tuple) or isinstance(reject, list))
            and len(reject) == 2):
            reject_code, reject_message = reject
        else:
            reject_code, reject_message = reject, "Pingback rejected."
        if reject_code == 1:
            raise Fault(0x0031, reject_message)

        from comments import writeComment
        config = request.getConfiguration()
        data = request.getData()
        data['entry_list'] = [ target_entry ]

        # TODO: Check if comment from the URL exists
        writeComment(request, config, data, cmt, config['blog_encoding'])
               
        return "success pinging %s from %s\n" % (target, source)
    else:
        raise Fault(0x0011, "%s does not point to %s" % (source, target))
Ejemplo n.º 52
0
def cb_handle(args):
    """
    This takes in a request and handles the request.
    """
    request = args["request"]
    pyhttp = request.getHttp()
    config = request.getConfiguration()

    urltrigger = config.get("xmlrpc_urltrigger", "/RPC")

    if os.environ.get("PATH_INFO", "").startswith(urltrigger):
        try:
            content_length = int(os.environ.get("CONTENT_LENGTH", "0"))
            maxrequest_length = config.get("xmlrpc_maxrequest_length", 10000)

            if content_length > maxrequest_length:
                raise ValueError, 'Request too large - %s bytes' % content_length

        except:
            response = xmlrpclib.dumps(
                xmlrpclib.Fault(1, "%s: %s" % sys.exc_info()[:2]))
            resp_str = ('Content-Type: text/xml\n') + \
                       ('Content-Length: %d\n\n' % len(response)) + \
                       response

            sys.stdout.write(resp_str)
            sys.stdout.flush()
            return 1

        # everything is cool--so we handle the xmlrpc request

        data = sys.stdin.read(content_length)

        # here we call the xmlrpc_init callback passing in a dict.
        # each function that registers with this callback adds their
        # xmlrpc functions to the dict.
        args = tools.run_callback("xmlrpc_register", {
            "request": request,
            "methods": {}
        },
                                  mappingfunc=lambda x, y: y,
                                  defaultfunc=lambda x: x)
        methods = args["methods"]
        request.addData({'xmlrpc_methods': methods})
        """
        if os.environ.get("REQUEST_METHOD", "") == "GET":
            resp = []
            resp.append("Content-Type: text/plain")
            resp.append("\n")
            resp.append("-" * 40)
            resp.append("Methods Defined:")
            resp.append("-" * 40)
            for mem in methods.keys():
                resp.append("  %s -> %s" % (mem, methods[mem]))
            sys.stdout.write("\n".join(resp))
            return 1
        """

        x = XMLRPCHandler(request, data).process()

        # return 1 indicating we've handled the request
        return 1
Ejemplo n.º 53
0
    def run_static_renderer(self, incremental=False):
        """This will go through all possible things in the blog and
        statically render everything to the ``static_dir`` specified
        in the config file.

        This figures out all the possible ``path_info`` settings and
        calls ``self.run()`` a bazillion times saving each file.

        :param incremental: Whether (True) or not (False) to
                            incrementally render the pages.  If we're
                            incrementally rendering pages, then we
                            render only the ones that have changed.
        """
        self.initialize()

        config = self._request.get_configuration()
        data = self._request.get_data()
        print "Performing static rendering."
        if incremental:
            print "Incremental is set."

        staticdir = config.get("static_dir", "")
        datadir = config["datadir"]

        if not staticdir:
            print "Error: You must set static_dir in your config file."
            return 0

        flavours = config.get("static_flavours", ["html"])

        renderme = []

        monthnames = config.get("static_monthnames", True)
        monthnumbers = config.get("static_monthnumbers", False)
        yearindexes = config.get("static_yearindexes", True)

        dates = {}
        categories = {}

        # first we handle entries and categories
        listing = tools.walk(self._request, datadir)

        for mem in listing:
            # skip the ones that have bad extensions
            ext = mem[mem.rfind(".")+1:]
            if not ext in data["extensions"].keys():
                continue

            # grab the mtime of the entry file
            mtime = time.mktime(tools.filestat(self._request, mem))

            # remove the datadir from the front and the bit at the end
            mem = mem[len(datadir):mem.rfind(".")]

            # this is the static filename
            fn = os.path.normpath(staticdir + mem)

            # grab the mtime of one of the statically rendered file
            try:
                smtime = os.stat(fn + "." + flavours[0])[8]
            except:
                smtime = 0

            # if the entry is more recent than the static, we want to
            # re-render
            if smtime < mtime or not incremental:

                # grab the categories
                temp = os.path.dirname(mem).split(os.sep)
                for i in range(len(temp)+1):
                    p = os.sep.join(temp[0:i])
                    categories[p] = 0

                # grab the date
                mtime = time.localtime(mtime)
                year = time.strftime("%Y", mtime)
                month = time.strftime("%m", mtime)
                day = time.strftime("%d", mtime)

                if yearindexes:
                    dates[year] = 1

                if monthnumbers:
                    dates[year + "/" + month] = 1
                    dates[year + "/" + month + "/" + day] = 1

                if monthnames:
                    monthname = tools.num2month[month]
                    dates[year + "/" + monthname] = 1
                    dates[year + "/" + monthname + "/" + day] = 1

                # toss in the render queue
                for f in flavours:
                    renderme.append( (mem + "." + f, "") )

        print "rendering %d entries." % len(renderme)

        # handle categories
        categories = categories.keys()
        categories.sort()

        # if they have stuff in their root category, it'll add a "/"
        # to the category list and we want to remove that because it's
        # a duplicate of "".
        if "/" in categories:
            categories.remove("/")

        print "rendering %d category indexes." % len(categories)

        for mem in categories:
            mem = os.path.normpath(mem + "/index.")
            for f in flavours:
                renderme.append((mem + f, ""))

        # now we handle dates
        dates = dates.keys()
        dates.sort()

        dates = ["/" + d for d in dates]

        print "rendering %d date indexes." % len(dates)

        for mem in dates:
            mem = os.path.normpath(mem + "/index.")
            for f in flavours:
                renderme.append((mem + f, ""))

        # now we handle arbitrary urls
        additional_stuff = config.get("static_urls", [])
        print "rendering %d arbitrary urls." % len(additional_stuff)

        for mem in additional_stuff:
            if mem.find("?") != -1:
                url = mem[:mem.find("?")]
                query = mem[mem.find("?")+1:]
            else:
                url = mem
                query = ""

            renderme.append((url, query))

        # now we pass the complete render list to all the plugins via
        # cb_staticrender_filelist and they can add to the filelist
        # any (url, query) tuples they want rendered.
        print "(before) building %s files." % len(renderme)
        tools.run_callback("staticrender_filelist",
                           {'request': self._request,
                            'filelist': renderme,
                            'flavours': flavours,
                            'incremental': incremental})

        renderme = sorted(set(renderme))

        print "building %s files." % len(renderme)

        for url, q in renderme:
            url = url.replace(os.sep, "/")
            print "rendering '%s' ..." % url

            tools.render_url_statically(config, url, q)

        # we're done, clean up
        self.cleanup()
Ejemplo n.º 54
0
def cb_prepare(args):
    """
    Handle comment related HTTP POST's.

    @param request: pyblosxom request object
    @type request: a Pyblosxom request object
    """
    request = args["request"]
    form = request.get_http()['form']
    config = request.get_configuration()
    data = request.get_data()
    pyhttp = request.get_http()

    # first we check to see if we're going to print out comments
    # the default is not to show comments
    data['display_comment_default'] = False

    # check to see if they have "showcomments=yes" in the querystring
    qstr = pyhttp.get('QUERY_STRING', None)
    if qstr != None:
        parsed_qs = cgi.parse_qs(qstr)
        if 'showcomments' in parsed_qs:
            if parsed_qs['showcomments'][0] == 'yes':
                data['display_comment_default'] = True

    # check to see if the bl_type is "file"
    if "bl_type" in data and data["bl_type"] == "file":
        data["bl_type_file"] = "yes"
        data['display_comment_default'] = True

    # second, we check to see if they're posting a comment and we
    # need to write the comment to disk.
    posting = (('ajax' in form and form['ajax'].value == 'post')
               or not "preview" in form)
    if (("title" in form and "author" in form and "body" in form and posting)):

        entry = data.get("entry_list", [])
        if len(entry) == 0:
            data["rejected"] = True
            data["comment_message"] = "No such entry exists."
            return
        entry = entry[0]

        if check_comments_disabled(config, entry):
            data["rejected"] = True
            data["comment_message"] = "Comments for that entry are disabled."
            return

        encoding = config.get('blog_encoding', 'utf-8')
        decode_form(form, encoding)

        body = form['body'].value
        author = form['author'].value
        title = form['title'].value
        url = ('url' in form and [form['url'].value] or [''])[0]

        # sanitize incoming data
        body = sanitize(body)
        author = sanitize(author)
        title = sanitize(title)

        # it doesn't make sense to add nofollow to link here, but we should
        # escape it. If you don't like the link escaping, I'm not attached
        # to it.
        cmt_time = time.time()
        w3cdate = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(cmt_time))
        date = time.strftime('%a %d %b %Y', time.gmtime(cmt_time))
        cdict = {
            'title': title,
            'author': author,
            'pubDate': str(cmt_time),
            'w3cdate': w3cdate,
            'date': date,
            'link': massage_link(url),
            'source': '',
            'description': add_dont_follow(body, config)
        }

        keys = form.keys()
        keys = [
            k for k in keys
            if k not in ["title", "url", "author", "body", "description"]
        ]
        for k in keys:
            cdict[k] = form[k].value

        if 'email' in form:
            cdict['email'] = form['email'].value

        cdict['ipaddress'] = pyhttp.get('REMOTE_ADDR', '')

        # record the comment's timestamp, so we can extract it and send it
        # back alone, without the rest of the page, if the request was ajax.
        data['cmt_time'] = float(cdict['pubDate'])

        argdict = {"request": request, "comment": cdict}
        reject = tools.run_callback("comment_reject",
                                    argdict,
                                    donefunc=lambda x: x != 0)
        if (((isinstance(reject, tuple) or isinstance(reject, list))
             and len(reject) == 2)):
            reject_code, reject_message = reject
        else:
            reject_code, reject_message = reject, "Comment rejected."
        if reject_code == 1:
            data["comment_message"] = reject_message
            data["rejected"] = True
        else:
            data["comment_message"] = write_comment(request, config, data, \
                                                   cdict, encoding)
Ejemplo n.º 55
0
    def run_static_renderer(self, incremental=False):
        """This will go through all possible things in the blog and
        statically render everything to the ``static_dir`` specified
        in the config file.

        This figures out all the possible ``path_info`` settings and
        calls ``self.run()`` a bazillion times saving each file.

        :param incremental: Whether (True) or not (False) to
                            incrementally render the pages.  If we're
                            incrementally rendering pages, then we
                            render only the ones that have changed.
        """
        self.initialize()

        config = self._request.get_configuration()
        data = self._request.get_data()
        print "Performing static rendering."
        if incremental:
            print "Incremental is set."

        static_dir = config.get("static_dir", "")
        data_dir = config["datadir"]

        if not static_dir:
            print "Error: You must set static_dir in your config file."
            return 0

        flavours = config.get("static_flavours", ["html"])
        index_flavours = config.get("static_index_flavours", ["html"])

        render_me = []

        month_names = config.get("static_monthnames", True)
        month_numbers = config.get("static_monthnumbers", False)
        year_indexes = config.get("static_yearindexes", True)

        dates = {}
        categories = {}

        # first we handle entries and categories
        listing = tools.walk(self._request, data_dir)

        for mem in listing:
            # skip the ones that have bad extensions
            ext = mem[mem.rfind(".") + 1:]
            if not ext in data["extensions"].keys():
                continue

            # grab the mtime of the entry file
            mtime = time.mktime(tools.filestat(self._request, mem))

            # remove the datadir from the front and the bit at the end
            mem = mem[len(data_dir):mem.rfind(".")]

            # this is the static filename
            fn = os.path.normpath(static_dir + mem)

            # grab the mtime of one of the statically rendered file
            try:
                smtime = os.stat(fn + "." + flavours[0])[8]
            except:
                smtime = 0

            # if the entry is more recent than the static, we want to
            # re-render
            if smtime < mtime or not incremental:

                # grab the categories
                temp = os.path.dirname(mem).split(os.sep)
                for i in range(len(temp) + 1):
                    p = os.sep.join(temp[0:i])
                    categories[p] = 0

                # grab the date
                mtime = time.localtime(mtime)
                year = time.strftime("%Y", mtime)
                month = time.strftime("%m", mtime)
                day = time.strftime("%d", mtime)

                if year_indexes:
                    dates[year] = 1

                if month_numbers:
                    dates[year + "/" + month] = 1
                    dates[year + "/" + month + "/" + day] = 1

                if month_names:
                    monthname = tools.num2month[month]
                    dates[year + "/" + monthname] = 1
                    dates[year + "/" + monthname + "/" + day] = 1

                # toss in the render queue
                for f in flavours:
                    render_me.append((mem + "." + f, ""))

        print "rendering %d entries." % len(render_me)

        # handle categories
        categories = categories.keys()
        categories.sort()

        # if they have stuff in their root category, it'll add a "/"
        # to the category list and we want to remove that because it's
        # a duplicate of "".
        if "/" in categories:
            categories.remove("/")

        print "rendering %d category indexes." % len(categories)

        for mem in categories:
            mem = os.path.normpath(mem + "/index.")
            for f in index_flavours:
                render_me.append((mem + f, ""))

        # now we handle dates
        dates = dates.keys()
        dates.sort()

        dates = ["/" + d for d in dates]

        print "rendering %d date indexes." % len(dates)

        for mem in dates:
            mem = os.path.normpath(mem + "/index.")
            for f in index_flavours:
                render_me.append((mem + f, ""))

        # now we handle arbitrary urls
        additional_stuff = config.get("static_urls", [])
        print "rendering %d arbitrary urls." % len(additional_stuff)

        for mem in additional_stuff:
            if mem.find("?") != -1:
                url = mem[:mem.find("?")]
                query = mem[mem.find("?") + 1:]
            else:
                url = mem
                query = ""

            render_me.append((url, query))

        # now we pass the complete render list to all the plugins via
        # cb_staticrender_filelist and they can add to the filelist
        # any (url, query) tuples they want rendered.
        print "(before) building %s files." % len(render_me)
        tools.run_callback(
            "staticrender_filelist", {
                'request': self._request,
                'filelist': render_me,
                'flavours': flavours,
                'incremental': incremental
            })

        render_me = sorted(set(render_me))

        print "building %s files." % len(render_me)

        for url, q in render_me:
            url = url.replace(os.sep, "/")
            print "rendering '%s' ..." % url

            tools.render_url_statically(dict(config), url, q)

        # we're done, clean up
        self.cleanup()
Ejemplo n.º 56
0
def blosxom_handler(request):
    """This is the default blosxom handler.

    It calls the renderer callback to get a renderer.  If there is no
    renderer, it uses the blosxom renderer.

    It calls the pathinfo callback to process the path_info http
    variable.

    It calls the filelist callback to build a list of entries to
    display.

    It calls the prepare callback to do any additional preparation
    before rendering the entries.

    Then it tells the renderer to render the entries.

    :param request: the request object.
    """
    config = request.get_configuration()
    data = request.get_data()

    # go through the renderer callback to see if anyone else wants to
    # render.  this renderer gets stored in the data dict for
    # downstream processing.
    rend = tools.run_callback('renderer',
                              {'request': request},
                              donefunc = lambda x: x != None,
                              defaultfunc = lambda x: None)

    if not rend:
        # get the renderer we want to use
        rend = config.get("renderer", "blosxom")

        # import the renderer
        rend = tools.importname("Pyblosxom.renderers", rend)

        # get the renderer object
        rend = rend.Renderer(request, config.get("stdoutput", sys.stdout))

    data['renderer'] = rend

    # generate the timezone variable
    data["timezone"] = time.tzname[time.localtime()[8]]

    # process the path info to determine what kind of blog entry(ies)
    # this is
    tools.run_callback("pathinfo",
                       {"request": request},
                       donefunc=lambda x:x != None,
                       defaultfunc=blosxom_process_path_info)

    # call the filelist callback to generate a list of entries
    data["entry_list"] = tools.run_callback(
        "filelist",
        {"request": request},
        donefunc=lambda x:x != None,
        defaultfunc=blosxom_file_list_handler)

    # figure out the blog-level mtime which is the mtime of the head
    # of the entry_list
    entry_list = data["entry_list"]
    if isinstance(entry_list, list) and len(entry_list) > 0:
        mtime = entry_list[0].get("mtime", time.time())
    else:
        mtime = time.time()
    mtime_tuple = time.localtime(mtime)
    mtime_gmtuple = time.gmtime(mtime)

    data["latest_date"] = time.strftime('%a, %d %b %Y', mtime_tuple)

    # Make sure we get proper 'English' dates when using standards
    loc = locale.getlocale(locale.LC_ALL)
    locale.setlocale(locale.LC_ALL, 'C')

    data["latest_w3cdate"] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
                                           mtime_gmtuple)
    data['latest_rfc822date'] = time.strftime('%a, %d %b %Y %H:%M GMT',
                                              mtime_gmtuple)

    # set the locale back
    locale.setlocale(locale.LC_ALL, loc)

    # we pass the request with the entry_list through the prepare
    # callback giving everyone a chance to transform the data.  the
    # request is modified in place.
    tools.run_callback("prepare", {"request": request})

    # now we pass the entry_list through the renderer
    entry_list = data["entry_list"]
    renderer = data['renderer']

    if renderer and not renderer.rendered:
        if entry_list:
            renderer.set_content(entry_list)
            # Log it as success
            tools.run_callback("logrequest",
                               {'filename':config.get('logfile',''),
                                'return_code': '200',
                                'request': request})
        else:
            renderer.add_header('Status', '404 Not Found')
            renderer.set_content(
                {'title': 'The page you are looking for is not available',
                 'body': 'Somehow I cannot find the page you want. ' +
                 'Go Back to <a href="%s">%s</a>?'
                 % (config["base_url"], config["blog_title"])})
            # Log it as failure
            tools.run_callback("logrequest",
                               {'filename':config.get('logfile',''),
                                'return_code': '404',
                                'request': request})
        renderer.render()

    elif not renderer:
        output = config.get('stdoutput', sys.stdout)
        output.write("Content-Type: text/plain\n\n" +
                     "There is something wrong with your setup.\n" +
                     "Check your config files and verify that your " +
                     "configuration is correct.\n")

    cache = tools.get_cache(request)
    if cache:
        cache.close()
Ejemplo n.º 57
0
def writeComment(request, config, data, comment):
    """
    Write a comment
    
    @param config: dict containing pyblosxom config info
    @type  config: dict
    
    @param data: dict containing entry info
    @type  data: dict
    
    @param comment: dict containing comment info
    @type  comment: dict

    @return: The success or failure of creating the comment.
    @rtype: string
    """
    entry = data['entry_list'][0]
    cdir = os.path.join(config['comment_dir'], entry['absolute_path'])
    cdir = os.path.normpath(cdir)
    if not os.path.isdir(cdir):
        os.makedirs(cdir)
    cfn = os.path.join(
        cdir, entry['fn'] + "-" + comment['pubDate'] + "." +
        config['comment_draft_ext'])

    argdict = {"request": request, "comment": comment}
    reject = tools.run_callback("comment_reject",
                                argdict,
                                donefunc=lambda x: x)
    if reject == 1:
        return "Comment rejected."

    # write comment
    cfile = None
    try:
        cfile = open(cfn, "w")
    except:
        tools.log("Couldn't open comment file %s for writing" % cfn)
        return "Error: Couldn't open comment file for writing."
    else:
        pass

    def makeXMLField(name, field):
        return "<" + name + ">" + cgi.escape(field[name]) + "</" + name + ">\n"

    try:
        try:
            #TODO: fix this to 'utf-8' and commit to sf
            comment[description].decode('utf-8')
            cfile.write('<?xml version=1.0 encoding=utf-8?>\n')
        except:
            encoding = config.get('blog_encoding', 'iso-8859-1')
            cfile.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
        cfile.write("<item>\n")
        cfile.write(makeXMLField('title', comment))
        cfile.write(makeXMLField('author', comment))
        cfile.write(makeXMLField('link', comment))
        cfile.write(makeXMLField('source', comment))
        cfile.write(makeXMLField('pubDate', comment))
        cfile.write(makeXMLField('description', comment))
        cfile.write("</item>\n")
        cfile.close()
    except:
        tools.log("Error writing comment data for ", cfn)
        cfile.close()

    # write latest pickle
    latest = None
    latestFilename = os.path.join(config['comment_dir'], 'LATEST.cmt')
    try:
        latest = open(latestFilename, "w")
    except:
        tools.log("Couldn't open latest comment pickle for writing")
        return "Error: Couldn't open latest comment pickle for writing."
    else:
        modTime = float(comment['pubDate'])

    try:
        cPickle.dump(modTime, latest)
        latest.close()
    except (IOError):
        # should log or e-mail
        if latest:
            latest.close()
        return "Error: Problem dumping the pickle."

    # if the right config keys are set, notify by e-mail
    if config.has_key('comment_smtp_server') and config.has_key(
            'comment_smtp_to'):

        # import the formatdate function which is in a different
        # place in Python 2.3 and up.
        try:
            from email.Utils import formatdate
        except ImportError:
            from rfc822 import formatdate

        import smtplib
        author = escape_SMTP_commands(clean_author(comment['author']))
        description = escape_SMTP_commands(comment['description'])
        if comment.has_key('email'):
            email = comment['email']
        else:
            email = config.get('comment_smtp_from', "*****@*****.**")
        try:
            server = smtplib.SMTP(config['comment_smtp_server'])
            curl = config['base_url'] + '/' + entry['file_path']

            headers = []
            headers.append(("From", email))
            headers.append(("To", config["comment_smtp_to"]))
            headers.append(("Date", formatdate(modTime)))
            headers.append(("Subject", "write back by %s" % author))

            html = """%s<br />\n%s<br />\n<a href="%s">%s</a>\n""" % (
                description, cfn, curl, curl)
            message = createhtmlmail(html, headers)
            server.sendmail(from_addr=email,
                            to_addrs=config['comment_smtp_to'],
                            msg=message)
            server.quit()
        except:
            tools.log("Error sending mail: %s" % message)
            return "Error: Problem sending notification email."

    msg = "Success: Comment has been registered."
    if config["comment_draft_ext"] != config["comment_ext"]:
        msg = msg + "  Comment will not appear until it has been manually approved by the owner of this web-site."
    return msg