def attachment_inlined(self, url, text, **kw): from MoinMoin.action import AttachFile import os _ = self.request.getText pagename, filename = AttachFile.absoluteName(url, self.page.page_name) fname = wikiutil.taintfilename(filename) fpath = AttachFile.getFilename(self.request, pagename, fname) ext = os.path.splitext(filename)[1] Parser = wikiutil.getParserForExtension(self.request.cfg, ext) if Parser is not None: try: content = file(fpath, 'r').read() # Try to decode text. It might return junk, but we don't # have enough information with attachments. content = wikiutil.decodeUnknownInput(content) if '.csv' in getattr(Parser, 'extensions', list()): colorizer = Parser(content, self.request, filename=filename, format_args=kw.get('format_args', '')) else: colorizer = Parser(content, self.request, filename=filename) colorizer.format(self) except IOError: pass return (self.attachment_link(1, url) + self.text(text) + self.attachment_link(0))
def attachment_inlined(self, url, text, **kw): from MoinMoin.action import AttachFile import os _ = self.request.getText pagename, filename = AttachFile.absoluteName(url, self.page.page_name) fname = wikiutil.taintfilename(filename) fpath = AttachFile.getFilename(self.request, pagename, fname) ext = os.path.splitext(filename)[1] Parser = wikiutil.getParserForExtension(self.request.cfg, ext) if Parser is not None: try: # rU: universal newline support so that even a \r is considered a valid line separator. # CSV exported by office (on Mac?) has \r line separators. content = file(fpath, 'rU').read() # Try to decode text. It might return junk, but we don't # have enough information with attachments. content = wikiutil.decodeUnknownInput(content) colorizer = Parser(content, self.request, filename=filename) colorizer.format(self) except IOError: pass return (self.attachment_link(1, url) + self.text(text) + self.attachment_link(0))
def send_viewfile(pagename, request): _ = request.getText filename, fpath = _access_file(pagename, request) if not filename: return request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>') type, enc = mimetypes.guess_type(filename) if type: if type[:5] == 'image': timestamp = htdocs_access(request) and "?%s" % time.time() or '' request.write('<img src="%s%s" alt="%s">' % ( getAttachUrl(pagename, filename, request, escaped=1), timestamp, wikiutil.escape(filename, 1))) return elif type[:4] == 'text': # TODO: should use formatter here! request.write("<pre>") # Try to decode file contents. It may return junk, but we # don't have enough information on attachments. content = open(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) content = wikiutil.escape(content) request.write(content) request.write("</pre>") return request.write('<p>' + _("Unknown file type, cannot display this attachment inline.") + '</p>') request.write('<a href="%s">%s</a>' % ( getAttachUrl(pagename, filename, request, escaped=1), wikiutil.escape(filename)))
def attachment_inlined(self, url, text, **kw): from MoinMoin.action import AttachFile import os _ = self.request.getText pagename, filename = AttachFile.absoluteName(url, self.page.page_name) fname = wikiutil.taintfilename(filename) fpath = AttachFile.getFilename(self.request, pagename, fname) ext = os.path.splitext(filename)[1] Parser = wikiutil.getParserForExtension(self.request.cfg, ext) if Parser is not None: try: content = file(fpath, "r").read() # Try to decode text. It might return junk, but we don't # have enough information with attachments. content = wikiutil.decodeUnknownInput(content) if ".csv" in getattr(Parser, "extensions", list()): colorizer = Parser(content, self.request, filename=filename, format_args=kw.get("format_args", "")) else: colorizer = Parser(content, self.request, filename=filename) colorizer.format(self) except IOError: pass return self.attachment_link(1, url) + self.text(text) + self.attachment_link(0)
def do_action(self): """ Load """ status = False _ = self._ form = self.form request = self.request comment = form.get('comment', [u''])[0] comment = wikiutil.clean_input(comment) filename = form.get('file__filename__') rename = form.get('rename', [''])[0].strip() if rename: target = rename else: target = filename target = AttachFile.preprocess_filename(target) target = wikiutil.clean_input(target) if target: filecontent = form['file'][0] if hasattr(filecontent, 'read'): # a file-like object filecontent = filecontent.read() # XXX reads complete file into memory! filecontent = wikiutil.decodeUnknownInput(filecontent) self.pagename = target pg = PageEditor(request, self.pagename) try: msg = pg.saveText(filecontent, 0, comment=comment) status = True except pg.EditConflict, e: msg = e.message except pg.SaveError, msg: msg = unicode(msg)
def decode_username(self, name): """ decode the name we got from the environment var to unicode """ if isinstance(name, str): if self.coding: name = name.decode(self.coding) else: # XXX we have no idea about REMOTE_USER encoding, please help if # you know how to do that cleanly name = wikiutil.decodeUnknownInput(name) return name
def send_viewfile(pagename, request): _ = request.getText filename, fpath = _access_file(pagename, request) if not filename: return request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>') type, enc = mimetypes.guess_type(filename) if type: if type[:5] == 'image': timestamp = htdocs_access(request) and "?%s" % time.time() or '' request.write('<img src="%s%s" alt="%s">' % ( getAttachUrl(pagename, filename, request, escaped=1), timestamp, wikiutil.escape(filename, 1))) return elif type[:4] == 'text': # TODO: should use formatter here! request.write("<pre>") # Try to decode file contents. It may return junk, but we # don't have enough information on attachments. content = open(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) content = wikiutil.escape(content) request.write(content) request.write("</pre>") return package = packages.ZipPackage(request, fpath) if package.isPackage(): request.write("<pre><b>%s</b>\n%s</pre>" % (_("Package script:"),wikiutil.escape(package.getScript()))) return import zipfile if zipfile.is_zipfile(fpath): zf = zipfile.ZipFile(fpath, mode='r') request.write("<pre>%-46s %19s %12s\n" % (_("File Name"), _("Modified")+" "*5, _("Size"))) for zinfo in zf.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time request.write(wikiutil.escape("%-46s %s %12d\n" % (zinfo.filename, date, zinfo.file_size))) request.write("</pre>") return request.write('<p>' + _("Unknown file type, cannot display this attachment inline.") + '</p>') request.write('<a href="%s">%s</a>' % ( getAttachUrl(pagename, filename, request, escaped=1), wikiutil.escape(filename)))
def do_action(self): """ Load """ status = False _ = self._ form = self.form request = self.request # Currently we only check TextCha for upload (this is what spammers ususally do), # but it could be extended to more/all attachment write access if not TextCha(request).check_answer_from_form(): return status, _('TextCha: Wrong answer! Go back and try again...') comment = form.get('comment', u'') comment = wikiutil.clean_input(comment) file_upload = request.files.get('file') if not file_upload: # This might happen when trying to upload file names # with non-ascii characters on Safari. return False, _("No file content. Delete non ASCII characters from the file name and try again.") filename = file_upload.filename rename = form.get('rename', '').strip() if rename: target = rename else: target = filename target = wikiutil.clean_input(target) if target: filecontent = file_upload.stream.read() # XXX reads complete file into memory! filecontent = wikiutil.decodeUnknownInput(filecontent) self.pagename = target pg = PageEditor(request, self.pagename) try: msg = pg.saveText(filecontent, 0, comment=comment) status = True except pg.EditConflict, e: msg = e.message except pg.SaveError, msg: msg = unicode(msg)
def attachment_inlined(self, url, text, **kw): from MoinMoin.action import AttachFile import os _ = self.request.getText pagename, filename = AttachFile.absoluteName(url, self.page.page_name) fname = wikiutil.taintfilename(filename) fpath = AttachFile.getFilename(self.request, pagename, fname) base, ext = os.path.splitext(filename) Parser = wikiutil.getParserForExtension(self.request.cfg, ext) if Parser is not None: try: content = file(fpath, 'r').read() # Try to decode text. It might return junk, but we don't # have enough information with attachments. content = wikiutil.decodeUnknownInput(content) colorizer = Parser(content, self.request) colorizer.format(self) except IOError: pass return self.attachment_link(url, text)
def attachment(self, url_and_text, **kw): """ This gets called on attachment URLs. """ import urllib _ = self._ if len(url_and_text) == 1: url = url_and_text[0] text = None else: url, text = url_and_text inline = url[0] == 'i' drawing = url[0] == 'd' url = url.split(":", 1)[1] url = urllib.unquote(url) text = text or url pagename = self.formatter.page.page_name parts = url.split('/') if len(parts) > 1: # get attachment from other page pagename = '/'.join(parts[:-1]) url = parts[-1] import urllib from MoinMoin.action import AttachFile fname = wikiutil.taintfilename(url) if drawing: drawing = fname fname = fname + ".png" url = url + ".png" # fallback for old gif drawings (1.1 -> 1.2) fpath = AttachFile.getFilename(self.request, pagename, fname) if not os.path.exists(fpath): gfname = fname[:-4] + ".gif" gurl = url[:-4] + ".gif" gfpath = AttachFile.getFilename(self.request, pagename, gfname) if os.path.exists(gfpath): fname, url, fpath = gfname, gurl, gfpath else: fpath = AttachFile.getFilename(self.request, pagename, fname) # check whether attachment exists, possibly point to upload form if not os.path.exists(fpath): if drawing: linktext = _('Create new drawing "%(filename)s"') else: linktext = _('Upload new attachment "%(filename)s"') return wikiutil.link_tag(self.request, self.formatter.text('%s?action=AttachFile&rename=%s%s' % ( wikiutil.quoteWikinameURL(pagename), urllib.quote_plus(fname.encode(config.charset)), drawing and ('&drawing=%s' % urllib.quote(drawing.encode(config.charset))) or '')), linktext % {'filename': self.formatter.text(fname)}) # check for image URL, and possibly return IMG tag # (images are always inlined, just like for other URLs) if not kw.get('pretty_url', 0) and wikiutil.isPicture(url): if drawing: # check for map file mappath = AttachFile.getFilename(self.request, pagename, drawing + '.map') edit_link = self.formatter.text('%s?action=AttachFile&rename=%s&drawing=%s' % (wikiutil.quoteWikinameURL(pagename), urllib.quote_plus(fname.encode(config.charset)), urllib.quote(drawing.encode(config.charset)))) if os.path.exists(mappath): # we have a image map. inline it and add a map ref # to the img tag try: map = open(mappath,'r').read() except IOError: pass except OSError: pass else: mapid = 'ImageMapOf'+drawing # replace MAPNAME map = map.replace('%MAPNAME%', mapid) # add alt and title tags to areas map = re.sub('href\s*=\s*"((?!%TWIKIDRAW%).+?)"',r'href="\1" alt="\1" title="\1"',map) # add in edit links plus alt and title attributes map = map.replace('%TWIKIDRAW%"', edit_link + '" alt="' + _('Edit drawing %(filename)s') % {'filename': self.formatter.text(fname)} + '" title="' + _('Edit drawing %(filename)s') % {'filename': self.formatter.text(fname)} + '"') # unxml, because 4.01 concrete will not validate /> map = map.replace('/>','>') return map + self.formatter.image(alt=drawing, src=AttachFile.getAttachUrl(pagename, url, self.request, addts=1), usemap='#'+mapid, html_class="drawing") else: return wikiutil.link_tag(self.request, edit_link, self.formatter.image(alt=url, src=AttachFile.getAttachUrl(pagename, url, self.request, addts=1), html_class="drawing"), attrs='title="%s"' % (_('Edit drawing %(filename)s') % {'filename': self.formatter.text(fname)})) else: return self.formatter.image(alt=url, src=AttachFile.getAttachUrl(pagename, url, self.request, addts=1)) # try to inline the attachment (parser know what they # can handle) base, ext = os.path.splitext(url) if inline: Parser = wikiutil.getParserForExtension(self.cfg, ext) if Parser is not None: content = file(fpath, 'r').read() # Try to decode text. It might return junk, but we don't # have enough information with attachments. content = wikiutil.decodeUnknownInput(content) colorizer = Parser(content, self.request) colorizer.format(self.formatter) url = AttachFile.getAttachUrl(pagename, url, self.request) if not kw.get('pretty_url', 0) and wikiutil.isPicture(url): return self.formatter.image(src=url) else: return (self.formatter.url(1, url) + self.formatter.text(text) + self.formatter.url(0))
class PluginScript(MoinScript): """Purpose: ======== This script imports the wiki page from given file into the wiki. Detailed Instructions: ====================== General syntax: moin [options] import wikipage [wikipage-options] [options] usually should be: --config-dir=/path/to/cfg --wiki-url=http://wiki.example.org/ --page=Page """ def __init__(self, argv, def_values): MoinScript.__init__(self, argv, def_values) self.parser.add_option('--acl', dest='acl', default='', metavar='ACL', help='Set a specific ACL for the wiki page') self.parser.add_option('--author', dest='author', metavar='AUTHOR', default='PageImporter', help='Use AUTHOR for edit history / RecentChanges') self.parser.add_option('--comment', dest='comment', metavar='COMMENT', default='', help='COMMENT for edit history / RecentChanges') self.parser.add_option('--file', dest='file', default='', metavar='FILE', help='Read the wiki page from the given file') self.parser.add_option('--no-backup', dest='revision_backup', default=True, action='store_false', help="Suppress making a page backup per revision") self._update_option_help('--page', 'Name of the wiki page which should be imported') def mainloop(self): self.init_request() request = self.request request.user.may = IAmRoot() if not self.options.page: fatal('You must specify a wiki page name (--page=Page)!') if not self.options.file: fatal('You must specify a FILE to read from (--file=FILE)!') try: fileObj = open(self.options.file, 'rb') except IOError, err: fatal(str(err)) page_content = decodeUnknownInput(fileObj.read()).rstrip() fileObj.close() if not self.options.acl: acl = '' else: acl = '#acl %s\n' % self.options.acl comment = clean_input(self.options.comment) pe = PageEditor(request, self.options.page, do_editor_backup=0, uid_override=self.options.author, do_revision_backup=int(self.options.revision_backup)) try: pe.saveText(acl + page_content, 0, comment=comment) except PageEditor.Unchanged: log("info: wikipage was not modified - ignored update.") except PageEditor.SaveError, err: log("error: %r" % err)
def send_viewfile(pagename, request): _ = request.getText fmt = request.html_formatter pagename, filename, fpath = _access_file(pagename, request) if not filename: return request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>') # show a download link above the content label = _('Download') link = (fmt.url(1, getAttachUrl(pagename, filename, request, do='get'), css_class="download") + fmt.text(label) + fmt.url(0)) request.write('%s<br><br>' % link) if filename.endswith('.tdraw') or filename.endswith('.adraw'): request.write(fmt.attachment_drawing(filename, '')) return mt = wikiutil.MimeType(filename=filename) # destinguishs if browser need a plugin in place if mt.major == 'image' and mt.minor in config.browser_supported_images: url = getAttachUrl(pagename, filename, request) request.write('<img src="%s" alt="%s">' % (wikiutil.escape(url, 1), wikiutil.escape(filename, 1))) return elif mt.major == 'text': ext = os.path.splitext(filename)[1] Parser = wikiutil.getParserForExtension(request.cfg, ext) if Parser is not None: try: content = file(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) colorizer = Parser(content, request, filename=filename) colorizer.format(request.formatter) return except IOError: pass request.write(request.formatter.preformatted(1)) # If we have text but no colorizing parser we try to decode file contents. content = open(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) content = wikiutil.escape(content) request.write(request.formatter.text(content)) request.write(request.formatter.preformatted(0)) return try: package = packages.ZipPackage(request, fpath) if package.isPackage(): request.write( "<pre><b>%s</b>\n%s</pre>" % (_("Package script:"), wikiutil.escape(package.getScript()))) return if zipfile.is_zipfile(fpath) and mt.minor == 'zip': zf = zipfile.ZipFile(fpath, mode='r') request.write("<pre>%-46s %19s %12s\n" % (_("File Name"), _("Modified") + " " * 5, _("Size"))) for zinfo in zf.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time request.write( wikiutil.escape("%-46s %s %12d\n" % (zinfo.filename, date, zinfo.file_size))) request.write("</pre>") return except RuntimeError: # We don't want to crash with a traceback here (an exception # here could be caused by an uploaded defective zip file - and # if we crash here, the user does not get a UI to remove the # defective zip file again). # RuntimeError is raised by zipfile stdlib module in case of # problems (like inconsistent slash and backslash usage in the # archive). logging.exception( "An exception within zip file attachment handling occurred:") return from MoinMoin import macro from MoinMoin.parser.text import Parser macro.request = request macro.formatter = request.html_formatter p = Parser("##\n", request) m = macro.Macro(p) # use EmbedObject to view valid mime types if mt is None: request.write( '<p>' + _("Unknown file type, cannot display this attachment inline.") + '</p>') link = (fmt.url(1, getAttachUrl(pagename, filename, request)) + fmt.text(filename) + fmt.url(0)) request.write('For using an external program follow this link %s' % link) return request.write( m.execute('EmbedObject', u'target="%s", pagename="%s"' % (filename, pagename))) return
def send_viewfile(pagename, request): _ = request.getText fmt = request.html_formatter pagename, filename, fpath = _access_file(pagename, request) if not filename: return request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>') # show a download link above the content label = _('Download') link = (fmt.url(1, getAttachUrl(pagename, filename, request, do='get'), css_class="download") + fmt.text(label) + fmt.url(0)) request.write('%s<br><br>' % link) if filename.endswith('.tdraw') or filename.endswith('.adraw'): request.write(fmt.attachment_drawing(filename, '')) return mt = wikiutil.MimeType(filename=filename) # destinguishs if browser need a plugin in place if mt.major == 'image' and mt.minor in config.browser_supported_images: url = getAttachUrl(pagename, filename, request) request.write('<img src="%s" alt="%s">' % ( wikiutil.escape(url, 1), wikiutil.escape(filename, 1))) return elif mt.major == 'text': ext = os.path.splitext(filename)[1] Parser = wikiutil.getParserForExtension(request.cfg, ext) if Parser is not None: try: content = file(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) colorizer = Parser(content, request, filename=filename) colorizer.format(request.formatter) return except IOError: pass request.write(request.formatter.preformatted(1)) # If we have text but no colorizing parser we try to decode file contents. content = open(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) content = wikiutil.escape(content) request.write(request.formatter.text(content)) request.write(request.formatter.preformatted(0)) return try: package = packages.ZipPackage(request, fpath) if package.isPackage(): request.write("<pre><b>%s</b>\n%s</pre>" % (_("Package script:"), wikiutil.escape(package.getScript()))) return if zipfile.is_zipfile(fpath) and mt.minor == 'zip': zf = zipfile.ZipFile(fpath, mode='r') request.write("<pre>%-46s %19s %12s\n" % (_("File Name"), _("Modified")+" "*5, _("Size"))) for zinfo in zf.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time request.write(wikiutil.escape("%-46s %s %12d\n" % (zinfo.filename, date, zinfo.file_size))) request.write("</pre>") return except (RuntimeError, zipfile.BadZipfile, zipfile.LargeZipFile): # We don't want to crash with a traceback here (an exception # here could be caused by an uploaded defective zip file - and # if we crash here, the user does not get a UI to remove the # defective zip file again). # RuntimeError is raised by zipfile stdlib module in case of # problems (like inconsistent slash and backslash usage in the # archive). # BadZipfile/LargeZipFile are raised when there are some # specific problems with the archive file. logging.exception("An exception within zip file attachment handling occurred:") return from MoinMoin import macro from MoinMoin.parser.text import Parser macro.request = request macro.formatter = request.html_formatter p = Parser("##\n", request) m = macro.Macro(p) # use EmbedObject to view valid mime types if mt is None: request.write('<p>' + _("Unknown file type, cannot display this attachment inline.") + '</p>') link = (fmt.url(1, getAttachUrl(pagename, filename, request)) + fmt.text(filename) + fmt.url(0)) request.write('For using an external program follow this link %s' % link) return request.write(m.execute('EmbedObject', u'target="%s", pagename="%s"' % (filename, pagename))) return
def headscript(self, d): """ Return html head script with common functions Changed: Added GoogleAnalytics support and support for customizable accesskeys @param d: parameter dictionary @rtype: unicode @return: script for html head """ # Don't add script for print view # Try..except for backwards compatibility of Moin versions only try: if self.request.action == 'print': return u'' except: if self.request.form.get('action', [''])[0] == 'print': return u'' # Searchbox stuff _ = self.request.getText script = u""" <script type="text/javascript"> <!-- var search_hint = "%(search_hint)s"; //--> </script> """ % { 'search_hint': _('Search', formatted=False), } # GoogleAnalytics stuff if hasattr(self.request.cfg, 'google_analytics_account_number'): account_number = self.request.cfg.google_analytics_account_number script += u""" <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"></script> <script type="text/javascript"> <!-- _uacct = "%(account_number)s"; urchinTracker(); //--> </script>""" % { 'account_number': account_number, } # Accesskey customization user = self.request.user content = '' if user.valid and user.name: homewiki, homepage = wikiutil.getInterwikiHomePage(self.request) # We don't support interwiki homepages at the moment. # In the long run better move accesskey customization to the userprefs menu instead # of using an attached file 'shortcuts.js" to the user's homepage. # This will solve most security, perfomance and interwiki homepage problems if homewiki == 'Self': from MoinMoin.action import AttachFile import os pagename, filename = AttachFile.absoluteName('shortcuts.js', homepage) fname = wikiutil.taintfilename(filename) fpath = AttachFile.getFilename(self.request, pagename, fname) base, ext = os.path.splitext(filename) try: # Try to get the user's shortcut list content = file(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) # Escape malicious code ## Turned off due to i18n problems with regex checking ## paras = [] ## try: ## paras = content.split(',') ## except: ## paras[0] = content ## import re ## pattern1 = re.compile('^"(name|id)\#[-_a-zA-Z0-9]+\=[a-zA-Z0-9]+"$') ## pattern2 = re.compile('^"(name|id)\#[-_a-zA-Z0-9]+\=[a-zA-Z0-9]+\![ -_a-zA-Z0-9]+"$') ## for para in paras: ## fail1 = fail2 = False ## if re.search(pattern1, para.strip()) == None: ## fail1 = True ## if re.search(pattern2, para.strip()) == None: ## fail2 = True ## if fail1 and fail2: ## content = '' ## break content = content.replace(')', '') content = content.replace(';', '') except: # User hasn't specified a shortcut list pass if not content: # If there is no user shortcut list: Do we have some global shortcut lists # set in wikiconfig.py? lang_keydefaults = 'accesskey_defaults_%s' % self.request.lang # Check whether there is a shortcut list fitting to the request.lang object if hasattr(self.request.cfg, lang_keydefaults): content = getattr(self.request.cfg, lang_keydefaults) # Otherwise check if 'accesskey_defaults' is set elif hasattr(self.request.cfg, 'accesskey_defaults'): content = self.request.cfg.accesskey_defaults script += u""" <script type="text/javascript"> <!-- var shortcut_list = new Array (%(shortcut_list)s); //--> </script> """ % { 'shortcut_list': content,} return script
def format(self, formatter): self.formatter = formatter text = self.raw output ='' settings = '-s ' + self.style if not self.abstract: settings = settings + ' -noabstract' if not self.keywords: settings = settings + ' -nokeywords' if self.usekeys: settings = settings + ' -use-keys' try: os.environ["BSTINPUTS"] = self.bstfiles except AttributeError: pass try: pin, pout, perr = os.popen3('%s -c %s %s %s ' % (self.bibtex2html, self.bibtex, param, settings)) pin.write(text) pin.flush() pin.close() output = '\n'.join(pout.readlines()) pout.close() perr.close() except AttributeError: output='''The locations of bibtex2html and bibtex have not been configured. Please add the following lines in wiki configuration: bibtex_bibtex = '/path/to/bibtex' bibtex_bibtex2html = '/path/to/bibtex2html' Sometime it is also necessary to specify where the BibTeX style files (.bst) are located. Then it is necessary to add a line in wiki configuration: bstfiles = '/path/to/bstfiles' (Also see HelpOnConfiguration).''' self.out.write(formatter.preformatted(1)+ output+ formatter.preformatted(0)) return except IOError: output = '\n'.join(perr.readlines()) pin.close() pout.close() perr.close() self.out.write(formatter.preformatted(1)+ output+ formatter.preformatted(0)) return output = wikiutil.decodeUnknownInput(output) #attachments p = re.compile("""(?P<pre>\<a\s+href=") (?P<url>.*?) (?P<suf>"\>(\.dvi|\.ps|\.pdf|\.rtf|\.txt|\.html)(\.gz|\.Z|\.zip)?\</a\>)""", re.I|re.X) output = p.sub(self.wikirepl, output) #process the hyperlinks db page = Page(self.request, self.hlinsdb) quotes = wikiutil.decodeUnknownInput(page.get_raw_body()) if not self.request.user.may.read(self.hlinsdb): quotes = '' if quotes != '' and output != '': for line in quotes.splitlines(): name,link = '','' if line.startswith(' *'): line = line[2:].strip() if line.startswith('[[') and line.endswith(']]') and line.find('|')>-1: link,name = line[2:-2].split('|',1) elif line.find('=')>-1: name,link = line.split('=',1) elif line.startswith(' ') and line.find(':: ')>-1: name,link = line.split(':: ',1) name,link = name.strip(),link.strip() if name == '' or link == '' : continue url=formatter.url(1, link)+' ' + name + formatter.url(0) # no html markup tags allowed name = name.replace('<','<').replace('>','>') name = re.escape(name) # to fit white spaces name = name.replace('\\ ', '(( )|\s)+') output = re.compile(name).sub(url, output) # To please Springer-Verlag. output = output.replace('Springer-Verlag', '©Springer-Verlag') self.out.write(formatter.rawHTML(output))