def test_returns_extension_if_file_has_extension(self): d = self.get_ext_dir() self.eq_(tools.what_ext(["txt", "html"], os.path.join(d, "a")), "txt") self.eq_(tools.what_ext(["txt", "html"], os.path.join(d, "b")), "html") self.eq_(tools.what_ext(["txt", "html"], os.path.join(d, "espaƱol")), "txt")
def test_returns_extension_if_file_has_extension(self): self._setup() try: assert "txt" == tools.what_ext(["txt", "html"], os.path.join(self._d, "a")) assert "html" == tools.what_ext(["txt", "html"], os.path.join(self._d, "b")) finally: self._teardown()
def test_returns_None_if_extension_not_present(self): self._setup() try: assert None == tools.what_ext([], os.path.join(self._d, "a")) assert None == tools.what_ext(["html"], os.path.join(self._d, "a")) finally: self._teardown()
def connect_links(base_url, extensions, wikidir, body): """ Looks for wiki links in [[topic]] and [[topic | desc]] format and expands them. """ if base_url.endswith("/"): base_url = base_url[:-1] i = 0 body2 = [] for match in WIKILINK.finditer(body): body2.append(body[i:match.span(0)[0]]) text = match.group(1) if "|" in text: topic, desc = text.split("|") topic = topic.strip() else: topic, desc = (text, text) fn = os.path.join(wikidir, topic) ext = tools.what_ext(extensions, fn) if not ext: body2.append(match.group(0)) i = match.span(0)[1] continue body2.append("<a href=\"%s/%s/%s\">%s</a>" % \ (base_url, TRIGGER, topic, desc)) i = match.span(0)[1] body2.append(body[i:]) return "".join(body2)
def cb_filelist(args): req = args["request"] pyhttp = req.getHttp() data = req.getData() config = req.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/" + TRIGGER): return data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] staticdir = config.get("staticdir", config['datadir']) staticdir = staticdir.replace("/", os.sep) if not staticdir[-1] == os.sep: staticdir = staticdir + os.sep page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER)+1:] if not page_name: return # FIXME - need to do a better job of sanitizing page_name = page_name.replace(os.sep, "/") if not page_name: return if page_name[-1] == os.sep: page_name = page_name[:-1] if page_name.find("/") > 0: page_name = page_name[page_name.rfind("/"):] # if the page has a flavour, we use that. otherwise # we default to the static flavour page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] # we build our own config dict for the fileentry to kind of # fake it into loading this file correctly rather than # one of the entries. newdatadir = staticdir # config["datadir"] = newdatadir ext = tools.what_ext(data["extensions"].keys(), staticdir + page_name) if not ext: return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' filename = staticdir + page_name + "." + ext if not os.path.isfile(filename): return [] fe = FileEntry(req, filename, staticdir) # now we evaluate python code blocks body = fe.getData() body = eval_python_blocks(req, body) body = "<!-- STATIC PAGE START -->\n\n" + body + "<!-- STATIC PAGE END -->\n" fe.setData(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "static" data['blog_title_with_path'] = config.get("blog_title", "") + " : " + fe.get("title", "") # set the datadir back config["datadir"] = datadir return [fe]
def cb_filelist(args): global registrydir, TRIGGER, SUBMITTRIGGER request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() form = pyhttp["form"] if not pyhttp["PATH_INFO"].startswith(TRIGGER): return data[INIT_KEY] = 1 data['root_datadir'] = config['datadir'] # Get our URL and configure the base_url param if pyhttp.has_key('SCRIPT_NAME'): if not config.has_key('base_url'): config['base_url'] = 'http://%s%s' % (pyhttp['HTTP_HOST'], pyhttp['SCRIPT_NAME']) else: config['base_url'] = config.get('base_url', '') config['base_url'] = config['base_url'] + TRIGGER # if they haven't add a registrydir to their config file, # we pleasantly error out if not config.has_key("registry_dir"): output = "<p>\"registry_dir\" config setting is not set. Refer to documentation.</p>" return [generate_entry(request, output, "setup error")] registrydir = config["registry_dir"] # make sure the registrydir has a / at the end if registrydir[-1] != os.sep: registrydir = registrydir + os.sep # if they are doing the queue thing, then we spin them off to queue # stuff. if pyhttp["PATH_INFO"].startswith(QUEUETRIGGER): data["extensions"]["txt-"] = data["extensions"]["txt"] return handle_registry_queue(args) # if they are doing the submit thing, then we spin them off to # the submit stuff. if pyhttp["PATH_INFO"].startswith(SUBMITTRIGGER): return handle_registry_submit(args) # check if we're looking for a listing of all entries if pyhttp["PATH_INFO"] == TRIGGER: entries = tools.Walk(request, registrydir) else: dir2 = pyhttp["PATH_INFO"][len(TRIGGER):] filename, ext = os.path.splitext(dir2) if os.path.isdir(registrydir + filename): entries = tools.Walk(request, registrydir + filename) else: fn = registrydir + filename[1:] fext = tools.what_ext(data["extensions"].keys(), fn) entries = [fn + "." + fext] if ext[1:]: data["flavour"] = ext[1:] # that entry doesn't exist.... if len(entries) == 0: output = "<p>No entries of that kind registered here.</p>" return [generate_entry(request, output)] # if we're looking at a specific entry.... if len(entries) == 1: try: entry = fileentry.FileEntry(request, entries[0], registrydir, registrydir) if entries[0].find("flavours") != -1: entry["template_name"] = "flavour-story" else: entry["template_name"] = "registry-story" if entry.has_key("contrib"): entry["body"] = entry["body"] + CONTRIB_DESC return [entry] except Exception, e: output = "That plugin does not exist." return [generate_entry(request, output)]
def cb_filelist(args): global registrydir, TRIGGER request = args["request"] pyhttp = request.get_http() data = request.get_data() config = request.get_configuration() if not pyhttp["PATH_INFO"].startswith(TRIGGER): return data[INIT_KEY] = 1 data['root_datadir'] = config['datadir'] # if they haven't add a registrydir to their config file, we # pleasantly error out if not config.has_key("registry_dir"): output = ("<p>\"registry_dir\" config setting is not set. " "Refer to documentation.</p>") return [generate_entry(request, output, "setup error")] registrydir = config["registry_dir"] # make sure the registrydir has a / at the end if registrydir[-1] != os.sep: registrydir = registrydir + os.sep pathinfo = pyhttp["PATH_INFO"] dir2 = pathinfo[len(TRIGGER):] if dir2.startswith("/"): dir2 = dir2[1:] filename, ext = os.path.splitext(dir2) fullpath = os.path.join(registrydir, filename) if os.path.isdir(fullpath): entries = tools.Walk(request, fullpath) elif fullpath.endswith("index") and os.path.isdir(fullpath[:len("index")]): entries = tools.Walk(request, fullpath[:-1 * len("index")]) else: fext = tools.what_ext(data["extensions"].keys(), fullpath) entries = [fullpath + "." + fext] if ext[1:]: data["flavour"] = ext[1:] # that entry doesn't exist.... if len(entries) == 0: output = "<p>No entries of that kind registered here.</p>" return [generate_entry(request, output)] # if we're looking at a specific entry.... if len(entries) == 1: try: entry = build_entry(request, entries[0], registrydir, False) if entries[0].find("/flavours/") != -1: entry["template_name"] = "flavour-story" else: entry["template_name"] = "registry-story" return [entry] except Exception: output = "<p>That plugin does not exist.</p>" return [generate_entry(request, output)] return get_entries_by_item(request, registrydir, entries, "path")
def cb_filelist(args): req = args["request"] pyhttp = req.getHttp() data = req.getData() config = req.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/" + TRIGGER): return data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] staticdir = config.get("staticdir", config['datadir']) staticdir = staticdir.replace("/", os.sep) if not staticdir[-1] == os.sep: staticdir = staticdir + os.sep page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:] if not page_name: return # FIXME - need to do a better job of sanitizing page_name = page_name.replace(os.sep, "/") if not page_name: return if page_name[-1] == os.sep: page_name = page_name[:-1] if page_name.find("/") > 0: page_name = page_name[page_name.rfind("/"):] # if the page has a flavour, we use that. otherwise # we default to the static flavour page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] # we build our own config dict for the fileentry to kind of # fake it into loading this file correctly rather than # one of the entries. newdatadir = staticdir # config["datadir"] = newdatadir ext = tools.what_ext(data["extensions"].keys(), staticdir + page_name) if not ext: return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' filename = staticdir + page_name + "." + ext if not os.path.isfile(filename): return [] fe = FileEntry(req, filename, staticdir) # now we evaluate python code blocks body = fe.getData() body = eval_python_blocks(req, body) body = "<!-- STATIC PAGE START -->\n\n" + body + "<!-- STATIC PAGE END -->\n" fe.setData(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "static" data['blog_title_with_path'] = config.get( "blog_title", "") + " : " + fe.get("title", "") # set the datadir back config["datadir"] = datadir return [fe]
def cb_filelist(args): """ This handles kicking off wbgwiki functionality if we see a url that we handle. """ req = args["request"] pyhttp = req.getHttp() config = req.getConfiguration() pathinfo = pyhttp["PATH_INFO"] if not pathinfo.startswith("/" + TRIGGER): return logger = tools.getLogger() data = req.getData() data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] wikidir = config.get("wikidir", config['datadir']) # convert the / to os.sep so that we can use os.path stuff. wikidir = wikidir.replace("/", os.sep) if not wikidir.endswith(os.sep): wikidir = wikidir + os.sep page_name = pathinfo[len("/" + TRIGGER) + 1:] if not page_name: return page_name = page_name.replace("/", os.sep) if not page_name: return if page_name.endswith(os.sep): page_name = page_name[:-1] # if the page has a flavour, we use that. otherwise # we default to the wiki flavour page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] # wikifile should hold the absolute path on the file system to # the wiki file we're looking at. if it's in a parent directory # of wikidir, then we abort. wikifile = os.path.normpath(os.path.join(wikidir, page_name)) if not wikifile.startswith(wikidir): logger.info("wiki file requested '%s' is not in wikidir." % wikifile) return [] # we build our own config dict for the fileentry to kind of # fake it into loading this file correctly rather than # one of the entries. newdatadir = wikidir ext = tools.what_ext(data["extensions"].keys(), wikifile) if not ext: logger.info("wiki file '%s' does not exist." % wikifile) return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' wikifile = wikifile + "." + ext if not os.path.isfile(wikifile): return [] fe = FileEntry(req, wikifile, wikidir) # now we evaluate python code blocks body = fe.getData() body = eval_python_blocks(req, body) body = "<!-- STATIC PAGE START -->\n\n%s\n<!-- STATIC PAGE END -->\n" % body # now we evaluate for wikilinks body = connect_links(config["base_url"], data["extensions"].keys(), wikidir, body) fe.setData(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "wiki" data['blog_title_with_path'] = "%s : %s" % \ (config.get("blog_title", ""), fe.get("title_escaped", "")) # set the datadir back config["datadir"] = datadir return [fe]
def cb_filelist(args): """ This handles kicking off wbgwiki functionality if we see a url that we handle. """ req = args["request"] pyhttp = req.getHttp() config = req.getConfiguration() pathinfo = pyhttp["PATH_INFO"] if not pathinfo.startswith("/" + TRIGGER): return logger = tools.getLogger() data = req.getData() data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] wikidir = config.get("wikidir", config['datadir']) # convert the / to os.sep so that we can use os.path stuff. wikidir = wikidir.replace("/", os.sep) if not wikidir.endswith(os.sep): wikidir = wikidir + os.sep page_name = pathinfo[len("/" + TRIGGER)+1:] if not page_name: return page_name = page_name.replace("/", os.sep) if not page_name: return if page_name.endswith(os.sep): page_name = page_name[:-1] # if the page has a flavour, we use that. otherwise # we default to the wiki flavour page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] # wikifile should hold the absolute path on the file system to # the wiki file we're looking at. if it's in a parent directory # of wikidir, then we abort. wikifile = os.path.normpath(os.path.join(wikidir, page_name)) if not wikifile.startswith(wikidir): logger.info("wiki file requested '%s' is not in wikidir." % wikifile) return [] # we build our own config dict for the fileentry to kind of # fake it into loading this file correctly rather than # one of the entries. newdatadir = wikidir ext = tools.what_ext(data["extensions"].keys(), wikifile) if not ext: logger.info("wiki file '%s' does not exist." % wikifile) return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' wikifile = wikifile + "." + ext if not os.path.isfile(wikifile): return [] fe = FileEntry(req, wikifile, wikidir) # now we evaluate python code blocks body = fe.getData() body = eval_python_blocks(req, body) body = "<!-- STATIC PAGE START -->\n\n%s\n<!-- STATIC PAGE END -->\n" % body # now we evaluate for wikilinks body = connect_links(config["base_url"], data["extensions"].keys(), wikidir, body) fe.setData(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "wiki" data['blog_title_with_path'] = "%s : %s" % \ (config.get("blog_title", ""), fe.get("title_escaped", "")) # set the datadir back config["datadir"] = datadir return [fe]
def cb_handle(args): """ @param args: a dict of plugin arguments @type args: dict """ request = args['request'] pyhttp = request.getHttp() config = request.getConfiguration() urltrigger = config.get('trackback_urltrigger','/trackback') path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): print "Content-type: text/xml" print form = cgi.FieldStorage() message = "not trackback" if form.has_key("title") and form.has_key("excerpt") and \ form.has_key("url") and form.has_key("blog_name"): import time cdict = { 'title': form['title'].value, \ 'author': 'Trackback from %s' % form['blog_name'].value, \ 'pubDate' : str(time.time()), \ 'link' : form['url'].value, \ 'source' : form['blog_name'].value, \ 'description' : form['excerpt'].value } from Pyblosxom.entries.fileentry import FileEntry from Pyblosxom.Request import Request from Pyblosxom.pyblosxom import PyBlosxom datadir = config['datadir'] from comments import writeComment try: import os pi = path_info.replace(urltrigger,'') path = os.path.join(datadir, pi[1:]) data = request.getData() ext = tools.what_ext(data['extensions'].keys(), path) entry = FileEntry(request, '%s.%s' % (path, ext), datadir ) data = {} data['entry_list'] = [ entry ] writeComment(config, data, cdict) print tb_good_response except OSError: message = 'URI '+path_info+" doesn't exist" tools.log(message) print tb_bad_response % message else: tools.log(message) print tb_bad_response % message import sys sys.stdout.flush() # no further handling is needed return 1 else: return 0
def test_returns_None_if_extension_not_present(self): d = self.get_ext_dir() self.eq_(tools.what_ext([], os.path.join(d, "a")), None) self.eq_(tools.what_ext(["html"], os.path.join(d, "a")), None)
def cb_handle(args): """ @param args: a dict of plugin arguments @type args: dict """ request = args['request'] pyhttp = request.getHttp() config = request.getConfiguration() urltrigger = config.get('trackback_urltrigger','/trackback') logger = tools.getLogger() path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): response = request.getResponse() response.addHeader("Content-type", "text/xml") form = request.getForm() message = "A trackback must have at least a URL field (see http://www.sixapart.com/pronet/docs/trackback_spec )" if form.has_key("url"): from comments import decode_form decode_form(form, config['blog_encoding']) import time cdict = { 'title': form.getvalue('title', ''), \ 'author': 'Trackback from %s' % form.getvalue('blog_name', ''), \ 'pubDate' : str(time.time()), \ 'link' : form['url'].value, \ 'source' : form.getvalue('blog_name', ''), \ 'description' : form.getvalue('excerpt', '') } argdict = { "request": request, "comment": cdict } reject = tools.run_callback("trackback_reject", argdict, donefunc=lambda x:x != 0) if ((isinstance(reject, tuple) or isinstance(reject, list)) and len(reject) == 2): reject_code, reject_message = reject else: reject_code, reject_message = reject, "Trackback rejected." if reject_code == 1: print >> response, tb_bad_response % reject_message return 1 from Pyblosxom.entries.fileentry import FileEntry from Pyblosxom.pyblosxom import Request from Pyblosxom.pyblosxom import PyBlosxom datadir = config['datadir'] from comments import writeComment try: import os pi = path_info.replace(urltrigger,'') path = os.path.join(datadir, pi[1:]) data = request.getData() ext = tools.what_ext(data['extensions'].keys(), path) entry = FileEntry(request, '%s.%s' % (path, ext), datadir ) data = {} data['entry_list'] = [ entry ] writeComment(request, config, data, cdict, config['blog_encoding']) print >> response, tb_good_response except OSError: message = 'URI '+path_info+" doesn't exist" logger.error(message) print >> response, tb_bad_response % message else: logger.error(message) print >> response, tb_bad_response % message # no further handling is needed return 1 else: return 0
def cb_handle(args): request = args['request'] pyhttp = request.get_http() config = request.get_configuration() urltrigger = config.get('trackback_urltrigger', '/trackback') logger = tools.get_logger() path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): response = request.get_response() response.add_header("Content-type", "text/xml") form = request.get_form() message = ("A trackback must have at least a URL field (see " "http://www.sixapart.com/pronet/docs/trackback_spec)") if "url" in form: from comments import decode_form encoding = config.get('blog_encoding', 'iso-8859-1') decode_form(form, encoding) import time cdict = {'title': form.getvalue('title', ''), 'author': form.getvalue('blog_name', ''), 'pubDate': str(time.time()), 'link': form['url'].value, 'source': form.getvalue('blog_name', ''), 'description': form.getvalue('excerpt', ''), 'ipaddress': pyhttp.get('REMOTE_ADDR', ''), 'type': 'trackback' } argdict = {"request": request, "comment": cdict} reject = tools.run_callback("trackback_reject", argdict, donefunc=lambda x: x != 0) if isinstance(reject, (tuple, list)) and len(reject) == 2: reject_code, reject_message = reject else: reject_code, reject_message = reject, "Trackback rejected." if reject_code == 1: print >> response, tb_bad_response % reject_message return 1 from Pyblosxom.entries.fileentry import FileEntry datadir = config['datadir'] from comments import writeComment try: import os pi = path_info.replace(urltrigger, '') path = os.path.join(datadir, pi[1:]) data = request.get_data() ext = tools.what_ext(data['extensions'].keys(), path) entry = FileEntry(request, '%s.%s' % (path, ext), datadir) data = {} data['entry_list'] = [entry] # Format Author cdict['author'] = ( 'Trackback from %s' % form.getvalue('blog_name', '')) writeComment(request, config, data, cdict, encoding) print >> response, tb_good_response except OSError: message = 'URI ' + path_info + " doesn't exist" logger.error(message) print >> response, tb_bad_response % message else: logger.error(message) print >> response, tb_bad_response % message # no further handling is needed return 1 return 0
def cb_filelist(args): req = args["request"] pyhttp = req.get_http() data = req.get_data() config = req.get_configuration() page_name = None if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)): return data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] pagesdir = config["pagesdir"] pagesdir = pagesdir.replace("/", os.sep) if not pagesdir[-1] == os.sep: pagesdir = pagesdir + os.sep pathinfo = pyhttp.get("PATH_INFO", "") path, ext = os.path.splitext(pathinfo) if pathinfo == "/" or path == "/index": page_name = "frontpage" else: page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:] if not page_name: return # FIXME - need to do a better job of sanitizing page_name = page_name.replace(os.sep, "/") if not page_name: return if page_name[-1] == os.sep: page_name = page_name[:-1] if page_name.find("/") > 0: page_name = page_name[page_name.rfind("/"):] # if the page has a flavour, we use that. otherwise # we default to the default flavour. page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] ext = tools.what_ext(data["extensions"].keys(), pagesdir + page_name) if not ext: return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' filename = pagesdir + page_name + "." + ext if not os.path.isfile(filename): return [] fe = FileEntry(req, filename, pagesdir) # now we evaluate python code blocks body = fe.get_data() body = eval_python_blocks(req, body) body = ("<!-- PAGES PAGE START -->\n\n" + body + "<!-- PAGES PAGE END -->\n") fe.set_data(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "pages" data['blog_title_with_path'] = (config.get("blog_title", "") + " : " + fe.get("title", "")) # set the datadir back config["datadir"] = datadir return [fe]
def cb_handle(args): """ @param args: a dict of plugin arguments @type args: dict """ request = args['request'] pyhttp = request.getHttp() config = request.getConfiguration() urltrigger = config.get('trackback_urltrigger', '/trackback') logger = tools.getLogger() path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): response = request.getResponse() response.addHeader("Content-type", "text/xml") form = request.getForm() message = "A trackback must have at least a URL field (see http://www.sixapart.com/pronet/docs/trackback_spec )" if form.has_key("url"): from comments import decode_form encoding = config.get('blog_encoding', 'iso-8859-1') decode_form(form, encoding) import time cdict = { 'title': form.getvalue('title', ''), 'author': form.getvalue('blog_name', ''), 'pubDate': str(time.time()), 'link': form['url'].value, 'source': form.getvalue('blog_name', ''), 'description': form.getvalue('excerpt', ''), 'ipaddress': pyhttp.get('REMOTE_ADDR', ''), 'type': 'trackback' } argdict = {"request": request, "comment": cdict} reject = tools.run_callback("trackback_reject", argdict, donefunc=lambda x: x != 0) if ((isinstance(reject, tuple) or isinstance(reject, list)) and len(reject) == 2): reject_code, reject_message = reject else: reject_code, reject_message = reject, "Trackback rejected." if reject_code == 1: print >> response, tb_bad_response % reject_message return 1 from Pyblosxom.entries.fileentry import FileEntry from Pyblosxom.pyblosxom import Request from Pyblosxom.pyblosxom import PyBlosxom datadir = config['datadir'] from comments import writeComment try: import os pi = path_info.replace(urltrigger, '') path = os.path.join(datadir, pi[1:]) data = request.getData() ext = tools.what_ext(data['extensions'].keys(), path) entry = FileEntry(request, '%s.%s' % (path, ext), datadir) data = {} data['entry_list'] = [entry] # Format Author cdict['author'] = 'Trackback from %s' % form.getvalue( 'blog_name', '') writeComment(request, config, data, cdict, encoding) print >> response, tb_good_response except OSError: message = 'URI ' + path_info + " doesn't exist" logger.error(message) print >> response, tb_bad_response % message else: logger.error(message) print >> response, tb_bad_response % message # no further handling is needed return 1 else: return 0
def cb_handle(args): """ @param args: a dict of plugin arguments @type args: dict """ request = args['request'] pyhttp = request.getHttp() config = request.getConfiguration() urltrigger = config.get('trackback_urltrigger', '/trackback') path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): print "Content-type: text/xml" print form = cgi.FieldStorage() message = "not trackback" if form.has_key("title") and form.has_key("excerpt") and \ form.has_key("url") and form.has_key("blog_name"): import time cdict = { 'title': form['title'].value, \ 'author': 'Trackback from %s' % form['blog_name'].value, \ 'pubDate' : str(time.time()), \ 'link' : form['url'].value, \ 'source' : form['blog_name'].value, \ 'description' : form['excerpt'].value } from Pyblosxom.entries.fileentry import FileEntry from Pyblosxom.Request import Request from Pyblosxom.pyblosxom import PyBlosxom datadir = config['datadir'] from comments import writeComment try: import os pi = path_info.replace(urltrigger, '') path = os.path.join(datadir, pi[1:]) data = request.getData() ext = tools.what_ext(data['extensions'].keys(), path) entry = FileEntry(request, '%s.%s' % (path, ext), datadir) data = {} data['entry_list'] = [entry] writeComment(config, data, cdict) print tb_good_response except OSError: message = 'URI ' + path_info + " doesn't exist" tools.log(message) print tb_bad_response % message else: tools.log(message) print tb_bad_response % message import sys sys.stdout.flush() # no further handling is needed return 1 else: return 0
def cb_filelist(args): req = args["request"] pyhttp = req.get_http() data = req.get_data() config = req.get_configuration() page_name = None if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)): return data[INIT_KEY] = 1 datadir = config["datadir"] data['root_datadir'] = config['datadir'] pagesdir = config["pagesdir"] pagesdir = pagesdir.replace("/", os.sep) if not pagesdir[-1] == os.sep: pagesdir = pagesdir + os.sep pathinfo = pyhttp.get("PATH_INFO", "") path, ext = os.path.splitext(pathinfo) if pathinfo == "/" or path == "/index": page_name = "frontpage" else: page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:] if not page_name: return # FIXME - need to do a better job of sanitizing page_name = page_name.replace(os.sep, "/") if not page_name: return if page_name[-1] == os.sep: page_name = page_name[:-1] if page_name.find("/") > 0: page_name = page_name[page_name.rfind("/"):] # if the page has a flavour, we use that. otherwise # we default to the default flavour. page_name, flavour = os.path.splitext(page_name) if flavour: data["flavour"] = flavour[1:] ext = tools.what_ext(data["extensions"].keys(), pagesdir + page_name) if not ext: return [] data['root_datadir'] = page_name + '.' + ext data['bl_type'] = 'file' filename = pagesdir + page_name + "." + ext if not os.path.isfile(filename): return [] fe = FileEntry(req, filename, pagesdir) # now we evaluate python code blocks body = fe.get_data() body = eval_python_blocks(req, body) body = ("<!-- PAGES PAGE START -->\n\n" + body + "<!-- PAGES PAGE END -->\n") fe.set_data(body) fe["absolute_path"] = TRIGGER fe["fn"] = page_name fe["file_path"] = TRIGGER + "/" + page_name fe["template_name"] = "pages" data['blog_title_with_path'] = ( config.get("blog_title", "") + " : " + fe.get("title", "")) # set the datadir back config["datadir"] = datadir return [fe]
def cb_handle(args): """ @param args: a dict of plugin arguments @type args: dict """ request = args['request'] pyhttp = request.getHttp() config = request.getConfiguration() urltrigger = config.get('commentAPI_urltrigger','/commentAPI') path_info = pyhttp['PATH_INFO'] if path_info.startswith(urltrigger): try: from Pyblosxom.entries.fileentry import FileEntry import os, sys pi = path_info.replace(urltrigger,'') if pi == '': sys.exit("<html><body>CommentAPI.cgi expects to receive an RSS item on standard input</body></html>") datadir = config['datadir'] path = os.path.join(datadir, pi[1:]) data = request.getData() filename = '' ext = tools.what_ext(data['extensions'].keys(),path) filename = os.path.normpath('%s.%s' % (path, ext)) entry = FileEntry(request, filename, datadir ) data = {} data['entry_list'] = [ entry ] commentString = sys.stdin.read() if commentString == None: sys.exit("<html><body>CommentAPI expects to receive an RSS item on standard input</body></html>") try: from xml.dom.minidom import parseString from xml.parsers.expat import ExpatError commentDOM = parseString(commentString) except ExpatError, ee: sys.exit("<html><body>The RSS Item you supplied could not be parsed.\nThe error occured at line %d, column %d</body></html>" % (ee.lineno,ee.offset)) def dictFromDOM(dom, data, field, default=''): """ Fill in a field in dict with the content of a element in the dom TODO: epydoc """ value = dom.getElementsByTagName(field) if len(value) == 1: data[field] = value[0].firstChild.data else: data[field] = default # use dictFromDOM to fill in a dict with the stuff in the comment cdict = {} dictFromDOM(commentDOM, cdict, 'title') dictFromDOM(commentDOM, cdict, 'author') dictFromDOM(commentDOM, cdict, 'link') dictFromDOM(commentDOM, cdict, 'source') # force an integer data stamp -- not in keeping with RFC 822, # but neither is RSS import time cdict['pubDate'] = str(time.time()) dictFromDOM(commentDOM, cdict, 'description') # must be done after plugin initialization from comments import writeComment # write the comment (in the dict) writeComment(request, config, data, cdict, config['blog_encoding']) print "Content-Type: text/plain\n" print "OK" except OSError: print "Content-Type: text/plain\n" print "An Error Occurred"
def blosxom_process_path_info(args): """Process HTTP ``PATH_INFO`` for URI according to path specifications, fill in data dict accordingly. The paths specification looks like this: - ``/foo.html`` and ``/cat/foo.html`` - file foo.* in / and /cat - ``/cat`` - category - ``/2002`` - category - ``/2002`` - year - ``/2002/Feb`` and ``/2002/02`` - Year and Month - ``/cat/2002/Feb/31`` and ``/cat/2002/02/31``- year and month day in category. :param args: dict containing the incoming Request object """ request = args['request'] config = request.get_configuration() data = request.get_data() pyhttp = request.get_http() form = request.getForm() # figure out which flavour to use. the flavour is determined by # looking at the "flav" post-data variable, the "flav" query # string variable, the "default_flavour" setting in the config.py # file, or "html" flav = config.get("default_flavour", "html") if form.has_key("flav"): flav = form["flav"].value data['flavour'] = flav data['pi_yr'] = '' data['pi_mo'] = '' data['pi_da'] = '' path_info = pyhttp.get("PATH_INFO", "") data['root_datadir'] = config['datadir'] data["pi_bl"] = path_info # first we check to see if this is a request for an index and we # can pluck the extension (which is certainly a flavour) right # off. newpath, ext = os.path.splitext(path_info) if newpath.endswith("/index") and ext: # there is a flavour-like thing, so that's our new flavour and # we adjust the path_info to the new filename data["flavour"] = ext[1:] path_info = newpath while path_info and path_info.startswith("/"): path_info = path_info[1:] absolute_path = os.path.join(config["datadir"], path_info) path_info = path_info.split("/") if os.path.isdir(absolute_path): # this is an absolute path data['root_datadir'] = absolute_path data['bl_type'] = 'dir' elif absolute_path.endswith("/index") and \ os.path.isdir(absolute_path[:-6]): # this is an absolute path with /index at the end of it data['root_datadir'] = absolute_path[:-6] data['bl_type'] = 'dir' else: # this is either a file or a date ext = tools.what_ext(data["extensions"].keys(), absolute_path) if not ext: # it's possible we didn't find the file because it's got a # flavour thing at the end--so try removing it and # checking again. newpath, flav = os.path.splitext(absolute_path) if flav: ext = tools.what_ext(data["extensions"].keys(), newpath) if ext: # there is a flavour-like thing, so that's our new # flavour and we adjust the absolute_path and # path_info to the new filename data["flavour"] = flav[1:] absolute_path = newpath path_info, flav = os.path.splitext("/".join(path_info)) path_info = path_info.split("/") if ext: # this is a file data["bl_type"] = "file" data["root_datadir"] = absolute_path + "." + ext else: data["bl_type"] = "dir" # it's possible to have category/category/year/month/day # (or something like that) so we pluck off the categories # here. pi_bl = "" while len(path_info) > 0 and \ not (len(path_info[0]) == 4 and path_info[0].isdigit()): pi_bl = os.path.join(pi_bl, path_info.pop(0)) # handle the case where we do in fact have a category # preceeding the date. if pi_bl: pi_bl = pi_bl.replace("\\", "/") data["pi_bl"] = pi_bl data["root_datadir"] = os.path.join(config["datadir"], pi_bl) if len(path_info) > 0: item = path_info.pop(0) # handle a year token if len(item) == 4 and item.isdigit(): data['pi_yr'] = item item = "" if (len(path_info) > 0): item = path_info.pop(0) # handle a month token if item in tools.MONTHS: data['pi_mo'] = item item = "" if (len(path_info) > 0): item = path_info.pop(0) # handle a day token if len(item) == 2 and item.isdigit(): data["pi_da"] = item item = "" if len(path_info) > 0: item = path_info.pop(0) # if the last item we picked up was "index", then we # just ditch it because we don't need it. if item == "index": item = "" # if we picked off an item we don't recognize and/or # there is still stuff in path_info to pluck out, then # it's likely this wasn't a date. if item or len(path_info) > 0: data["bl_type"] = "dir" data["root_datadir"] = absolute_path # construct our final URL url = config['base_url'] if data['pi_bl'].startswith("/") and url.endswith("/"): url = url[:-1] + data['pi_bl'] elif data['pi_bl'].startswith("/") or url.endswith("/"): url = url + data["pi_bl"] else: url = url + "/" + data['pi_bl'] data['url'] = url # set path_info to our latest path_info data['path_info'] = path_info if data.get("pi_yr"): data["truncate"] = config.get("truncate_date", False) elif data.get("bl_type") == "dir": if data["path_info"] == [''] or data["path_info"] == ['index']: data["truncate"] = config.get("truncate_frontpage", True) else: data["truncate"] = config.get("truncate_category", True) else: data["truncate"] = False
def cb_handle(args): request = args["request"] pyhttp = request.get_http() config = request.get_configuration() urltrigger = config.get("trackback_urltrigger", "/trackback") logger = tools.get_logger() path_info = pyhttp["PATH_INFO"] if path_info.startswith(urltrigger): response = request.get_response() response.add_header("Content-type", "text/xml") form = request.get_form() message = ( "A trackback must have at least a URL field (see " "http://www.sixapart.com/pronet/docs/trackback_spec)" ) if "url" in form: from comments import decode_form encoding = config.get("blog_encoding", "iso-8859-1") decode_form(form, encoding) import time cdict = { "title": form.getvalue("title", ""), "author": form.getvalue("blog_name", ""), "pubDate": str(time.time()), "link": form["url"].value, "source": form.getvalue("blog_name", ""), "description": form.getvalue("excerpt", ""), "ipaddress": pyhttp.get("REMOTE_ADDR", ""), "type": "trackback", } argdict = {"request": request, "comment": cdict} reject = tools.run_callback("trackback_reject", argdict, donefunc=lambda x: x != 0) if isinstance(reject, (tuple, list)) and len(reject) == 2: reject_code, reject_message = reject else: reject_code, reject_message = reject, "Trackback rejected." if reject_code == 1: print >> response, tb_bad_response % reject_message return 1 from Pyblosxom.entries.fileentry import FileEntry datadir = config["datadir"] from comments import writeComment try: import os pi = path_info.replace(urltrigger, "") path = os.path.join(datadir, pi[1:]) data = request.get_data() ext = tools.what_ext(data["extensions"].keys(), path) entry = FileEntry(request, "%s.%s" % (path, ext), datadir) data = {} data["entry_list"] = [entry] # Format Author cdict["author"] = "Trackback from %s" % form.getvalue("blog_name", "") writeComment(request, config, data, cdict, encoding) print >> response, tb_good_response except OSError: message = "URI " + path_info + " doesn't exist" logger.error(message) print >> response, tb_bad_response % message else: logger.error(message) print >> response, tb_bad_response % message # no further handling is needed return 1 return 0
def blosxom_process_path_info(args): """Process HTTP ``PATH_INFO`` for URI according to path specifications, fill in data dict accordingly. The paths specification looks like this: - ``/foo.html`` and ``/cat/foo.html`` - file foo.* in / and /cat - ``/cat`` - category - ``/2002`` - category - ``/2002`` - year - ``/2002/Feb`` and ``/2002/02`` - Year and Month - ``/cat/2002/Feb/31`` and ``/cat/2002/02/31``- year and month day in category. :param args: dict containing the incoming Request object """ request = args['request'] config = request.get_configuration() data = request.get_data() py_http = request.get_http() form = request.get_form() # figure out which flavour to use. the flavour is determined by # looking at the "flav" post-data variable, the "flav" query # string variable, the "default_flavour" setting in the config.py # file, or "html" flav = config.get("default_flavour", "html") if form.has_key("flav"): flav = form["flav"].value data['flavour'] = flav data['pi_yr'] = '' data['pi_mo'] = '' data['pi_da'] = '' path_info = py_http.get("PATH_INFO", "") data['root_datadir'] = config['datadir'] data["pi_bl"] = path_info # first we check to see if this is a request for an index and we # can pluck the extension (which is certainly a flavour) right # off. new_path, ext = os.path.splitext(path_info) if new_path.endswith("/index") and ext: # there is a flavour-like thing, so that's our new flavour and # we adjust the path_info to the new filename data["flavour"] = ext[1:] path_info = new_path while path_info and path_info.startswith("/"): path_info = path_info[1:] absolute_path = os.path.join(config["datadir"], path_info) path_info = path_info.split("/") if os.path.isdir(absolute_path): # this is an absolute path data['root_datadir'] = absolute_path data['bl_type'] = 'dir' elif absolute_path.endswith("/index") and \ os.path.isdir(absolute_path[:-6]): # this is an absolute path with /index at the end of it data['root_datadir'] = absolute_path[:-6] data['bl_type'] = 'dir' else: # this is either a file or a date ext = tools.what_ext(data["extensions"].keys(), absolute_path) if not ext: # it's possible we didn't find the file because it's got a # flavour thing at the end--so try removing it and # checking again. new_path, flav = os.path.splitext(absolute_path) if flav: ext = tools.what_ext(data["extensions"].keys(), new_path) if ext: # there is a flavour-like thing, so that's our new # flavour and we adjust the absolute_path and # path_info to the new filename data["flavour"] = flav[1:] absolute_path = new_path path_info, flav = os.path.splitext("/".join(path_info)) path_info = path_info.split("/") if ext: # this is a file data["bl_type"] = "file" data["root_datadir"] = absolute_path + "." + ext else: data["bl_type"] = "dir" # it's possible to have category/category/year/month/day # (or something like that) so we pluck off the categories # here. pi_bl = "" while len(path_info) > 0 and \ not (len(path_info[0]) == 4 and path_info[0].isdigit()): pi_bl = os.path.join(pi_bl, path_info.pop(0)) # handle the case where we do in fact have a category # preceding the date. if pi_bl: pi_bl = pi_bl.replace("\\", "/") data["pi_bl"] = pi_bl data["root_datadir"] = os.path.join(config["datadir"], pi_bl) if len(path_info) > 0: item = path_info.pop(0) # handle a year token if len(item) == 4 and item.isdigit(): data['pi_yr'] = item item = "" if len(path_info) > 0: item = path_info.pop(0) # handle a month token if item in tools.MONTHS: data['pi_mo'] = item item = "" if len(path_info) > 0: item = path_info.pop(0) # handle a day token if len(item) == 2 and item.isdigit(): data["pi_da"] = item item = "" if len(path_info) > 0: item = path_info.pop(0) # if the last item we picked up was "index", then we # just ditch it because we don't need it. if item == "index": item = "" # if we picked off an item we don't recognize and/or # there is still stuff in path_info to pluck out, then # it's likely this wasn't a date. if item or len(path_info) > 0: data["bl_type"] = "dir" data["root_datadir"] = absolute_path # construct our final URL url = config['base_url'] if data['pi_bl'].startswith("/") and url.endswith("/"): url = url[:-1] + data['pi_bl'] elif data['pi_bl'].startswith("/") or url.endswith("/"): url = url + data["pi_bl"] else: url = url + "/" + data['pi_bl'] data['url'] = url # set path_info to our latest path_info data['path_info'] = path_info if data.get("pi_yr"): data["truncate"] = config.get("truncate_date", False) elif data.get("bl_type") == "dir": if data["path_info"] == [''] or data["path_info"] == ['index']: data["truncate"] = config.get("truncate_frontpage", True) else: data["truncate"] = config.get("truncate_category", True) else: data["truncate"] = False