def redirect_to_name(self, itemname): "Redirect to an item, by name." if itemname == 'DefaultTiddlers': fsname = makeFSname('index') + '.html' else: fsname = makeFSname(itemname) + '.html' raise HTTPRedirect(fsname)
def item_post_conv(wiki, item, backups): from wikklytext.store import makeFSname if wiki.get_kind() == 'text': # move original .txt file to backups txt = os.path.join(wiki.get_path(), makeFSname(item.name) + '.txt') shutil.copy(txt, backups) os.unlink(txt) elif wiki.get_kind() in ['tiddlywiki', 'sqlite']: # nothing to cleanup for items pass else: raise Exception("Don't know what to do with %s" % wiki.get_kind())
def itemname_from_fsname(self, fsname): """ Given a fsname (from makeFSname, i.e. no extension), return the corresponding item name, or None if not found. """ # check special names if fsname in ['index','index-Names','index-Tags','index-Timeline']: return fsname for item in self.store().getall(): if makeFSname(item.name) == fsname: return item.name return None
def conv_check_ok(wiki, newkind, newpath): from wikklytext.store import makeFSname if newkind in ['tiddlywiki', 'sqlite']: if os.path.exists(newpath): print "'%s' exists - not overwriting" % newpath return False elif newkind == 'text': for item in wiki.getall(): txt = os.path.join(wiki.get_path(), makeFSname(item.name) + '.txt') if os.path.exists(txt): print "'%s' would be overwritten by conversion -- exiting now." % txt return False else: raise Exception("Unknown conversion") return True
def wik_clean(wikipath): wiki = wik_open(wikipath) if not wiki.initted(): print "** Error - no wiki found here." return for item in wiki.store().getall(): fname = os.path.join(wiki.get_path(), makeFSname(item.name) + '.html') if os.path.isfile(fname): print "Remove %s" % fname os.unlink(fname) for name in ['index.html', 'index-Timeline.html', 'index-Names.html', 'index-Tags.html']: if os.path.isfile(name): print "Remove %s" % name os.unlink(name) # clear cache as well print "Clearing cache ..." wiki.rendercache().clear_all()
def wik_clean(wikipath): wiki = wik_open(wikipath) if not wiki.initted(): print "** Error - no wiki found here." return for item in wiki.store().getall(): fname = os.path.join(wiki.get_path(), makeFSname(item.name) + '.html') if os.path.isfile(fname): print "Remove %s" % fname os.unlink(fname) for name in [ 'index.html', 'index-Timeline.html', 'index-Names.html', 'index-Tags.html' ]: if os.path.isfile(name): print "Remove %s" % name os.unlink(name) # clear cache as well print "Clearing cache ..." wiki.rendercache().clear_all()
def renderable_items(self, logfunc=nopfunc): """ Return a list of all renderable items in the store. Returns list of: (item, html_name) """ items = [] skipset = set_(self.get_skiplist()) regen_all = False # much faster to use getall() for some store types for item in self.store().getall(): if item.name in skipset: # ignore item continue if item.name in self.reserved_names(): print "*** WARNING - Name '%s' is reserved by WikklyText. Will not render." % item.name continue # make HTML filename if item.name == 'DefaultTiddlers': fsname = self.fpath('index.html') else: fsname = self.fpath(makeFSname(item.name) + '.html') items.append((item, fsname)) # if DefaultTiddlers does not exist, make empty one allnames = [i[0].name for i in items] if 'DefaultTiddlers' not in allnames: d = WikklyItem('DefaultTiddlers') items.append((d, self.fpath('index.html'))) return items
def render(self, namelist=None, uid=None, logfunc=nopfunc, oneitem=None): """ namelist or oneitem can be used to only render certain items: * If namelist is not None, only those names will be rendered. namelist can include special names like 'index', 'index-Timeline', etc., to render the indexes. * If oneitem is not None, it must be a WikklyItem to be rendered. * Only (at most) one of namelist and oneitem can be given. uid, if given, is the logged-in UID (for customizing display) None means no logged-in user. All items are rendered to memory and returned as a map of: map[item_name] = (fsname, rendered_html) Where 'fsname' is the full path name (if the caller wants to save the rendered file). 'rendered_html' is the HTML, ready to be served. """ from wikklytext.wiki.layout import layoutPage, layoutTimelinePage, \ layoutNameIndexPage, layoutTagIndexPage from time import time # API check assert(namelist is None or oneitem is None) # cannot BOTH be given rendered = {} # turn 'index' into 'DefaultTiddlers' if namelist is not None: try: i = namelist.index('index') namelist[i] = 'DefaultTiddlers' replaced_index = True except ValueError: replaced_index = False else: replaced_index = False # make list of (item,fsname) to render if oneitem is None: # if caller provided a namelist, only render those names allitems = [(item,fsname) for item,fsname in self.renderable_items(logfunc) if \ namelist is None or item.name in namelist] else: # only render 'oneitem' allitems = [(oneitem, self.fpath(makeFSname(item.name) + '.html'))] logfunc("Info: ") logfunc(self.store().info()) logfunc("Rendering ...") for item,fsname in allitems: t0 = time() logfunc(" Rendering %s ..." % os.path.basename(fsname)) html = layoutPage(self, item, uid) t1 = time() #sys.stdout.write("\b\b\b %.1f secs\n" % (t1-t0)) #t0 = time() # turn 'DefaultTiddlers' into 'index' as needed if item.name == 'DefaultTiddlers' and replaced_index: rendered['index'] = (fsname, html) # caller asked for 'index' not 'DefaultTiddlers' else: rendered[item.name] = (fsname, html) #t1 = time() #print "\b\b\b %.1f secs " % (t1-t0) # NOTE: Don't change the names of the index-* files without # changing the URLResolver in wiki/render.py also. # write timeline index if namelist is None or 'index-Timeline' in namelist: logfunc(" Writing timeline index") html = layoutTimelinePage(self, uid) rendered['index-Timeline'] = (self.fpath('index-Timeline.html'), html) # write name index if namelist is None or 'index-Names' in namelist: logfunc(" Writing name index") html = layoutNameIndexPage(self, uid) rendered['index-Names'] = (self.fpath('index-Names.html'), html) # write tag index if namelist is None or 'index-Tags' in namelist: logfunc(" Writing tag index") html = layoutTagIndexPage(self, uid) rendered['index-Tags'] = (self.fpath('index-Tags.html'), html) return rendered
def default(self, *args, **kwargs): """ ALL requests go through 'default' (even .index redirects here). This simplifies logon and logging. Before adding any more methods with .exposed=True, try adding them here instead. """ #print "*(DEFAULT)*",args,kwargs #print "Q",cherrypy.request.wsgi_environ['QUERY_STRING'] from wikklytext.store import makeFSname from urllib import urlencode from copy import copy import wsgiref.headers # use wsgiref.headers to get case-insensitive behavior wsgi = wsgiref.headers.Headers(self.build_wsgienv().items()) # pass request information to wiki baseurl = cherrypy.request.base + cherrypy.request.script_name self.wiki.setRT_baseurl(baseurl) self.wiki.setRT_wsgienv(wsgi) self.wiki.setRT_curUID(cherrypy.session.get('UID',None)) # check requests for css/* files (always allow these requests so basic error # pages can do styling) if len(args) == 2 and args[0] == 'css': metadata.log_staticfile(self.metadb, args[1], cherrypy.session.get('UID',None), args, kwargs) return self.serve_css_element(args[1]) # check error conditions first self.ensure_metadb() if self.metadb == metadata.NeedsUpdate or metadata.wiki_needs_update(self.wiki): updater = metadata.MetaUpdater(self.wiki) updater.do_updates() return layoutWikiWasUpdated(self.wiki) #s += '<hr>' #s += 'Headers: %s\n' % str(cherrypy.request.headers) #s += 'Remote: %s, %s\n' % (str(cherrypy.request.remote.ip),str(cherrypy.request.remote.port)) #return s #cherrypy.log.error("DEFAULT, args=%s, kwargs=%s" % (str(args),str(kwargs))) # /favicon.png (lives in css/, but I make links to /favicon.png in <HEAD> if len(args) == 1 and args[0] == 'favicon.png': metadata.log_staticfile(self.metadb, args[0], cherrypy.session.get('UID',None), args, kwargs) return self.serve_css_element(args[0]) # some browsers ask for favicon.ico, ignoring what is given in <HEAD> if len(args) == 1 and args[0] == 'favicon.ico': # don't log since this is a known condition raise HTTPError(status=404, message='Browser bug: Your browser is asking for favicon.ico even though favicon.png is specified in the document HEAD.') # /rss.xml if len(args) == 1 and args[0] == 'rss.xml': from wikklytext.wiki.rss import create_rss_xml metadata.log_feed(self.metadb, args[0], cherrypy.session.get('UID',None), args, kwargs) url_site = cherrypy.request.base + cherrypy.request.script_name xml = create_rss_xml(self.wiki, url_site) cherrypy.response.headers['Content-Type'] = 'text/xml' return xml # /robots.txt if len(args) == 1 and args[0] == 'robots.txt': metadata.log_staticfile(self.metadb, args[0], cherrypy.session.get('UID',None), args, kwargs) # for now, allow robots to access everything robots = """ User-agent: * Disallow: """ cherrypy.response.headers['Content-Type'] = 'text/plain' return robots # serve /api calls if len(args) and args[0] == 'api': from wsgifront import CherryPyWrapper, shift_path_info from wikklytext.wiki.web import rootapp from copy import copy wrapped = CherryPyWrapper(rootapp) env = self.wiki.getRT_wsgienv() # gets a copy # want wrapped app to see /api as root, so shift shift_path_info(env) env['wsgifront.x-wikklytext-wiki'] = self.wiki env['wsgifront.x-wikklytext-UID'] = cherrypy.session.get('UID',None) return wrapped.run(env) #s = 'SERVE /api<p>' #env = cherrypy.request.wsgi_environ #keys = ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', # 'SCRIPT_NAME', 'PATH_INFO', 'QUERY_STRING', 'CONTENT_TYPE', 'CONTENT_LENGTH'] #for k in env.keys(): # s += '%s: %s<br>' % (k,env.get(k,'')) #return s # serve all files under known prefixes if len(args) > 0 and len(kwargs) == 0 and self.can_serve_path(args): #print "GET FILE",self.wiki.get_path(),args name = os.path.join(self.wiki.get_path(), *args) subpath = os.path.join(*args) #print "FULL",name # watch for suspicious-looking file requests (possibly trying to request files # outside of allowed tree). Catch names containing '..', '~', or paths that # don't begin with my wiki directory. if '..' in name or '~' in name or name[:len(self.wiki.get_path())] != self.wiki.get_path(): # don't show full name to user - potential info leak msg = "Ignoring suspicious file request: %s" % subpath metadata.log_errormsg(self.metadb, msg, cherrypy.session.get('UID',None), args, kwargs) raise HTTPError(status=400, message=msg) if os.path.isfile(name): metadata.log_staticfile(self.metadb, name, cherrypy.session.get('UID',None), args, kwargs) return serve_file(name) else: # don't show full name to user - potential info leak msg = "Request for unknown file: %s" % subpath metadata.log_errormsg(self.metadb, msg, cherrypy.session.get('UID',None), args, kwargs) raise HTTPError(status=404, message=msg) # check for commands that do not require the user to be logged in if len(args) == 1 and len(kwargs) > 0 and \ args[0] == 'DoServerCmd.html' and \ kwargs.get('cmd',None) in ['doLogin', 'saveUser', 'search', 'debug']: return self.handle_cmd(args, kwargs) # process commands sent via 'DoServerCmd.html?cmd=NAME' if len(args) == 1 and len(kwargs) > 0: if args[0] == 'DoServerCmd.html': cmd = kwargs.get('cmd',None) if cmd is None: # don't bother logging return layoutErrorPage(self.wiki, 'No command given to DoServerCmd!', self.loginUID()) # all other commands require user to be logged in # (also, if root user does not exist, force user to create) if not self.loggedIn() or '0' not in self.wiki.user_all_UIDs(): # remember original request so I can redirect after login # (exclude some commands where this would give weird behavior) if cmd not in ['doLogin', 'saveUser', 'completeEdit', 'deleteItem']: self.set_stored_URL(args, kwargs) return self.beginLogin() return self.handle_cmd(args,kwargs) # now for more general content requests (ignore any kwargs -- may have been # redirected here via something like /PageName.html?weird&query&here (i.e. Disqus # does this when replying to comments) if len(args) == 1: base,ext = os.path.splitext(args[0]) if ext.lower() == '.html': # store URL if needed later self.set_stored_URL(args, kwargs) # if root user does not yet exist, it must be created # before doing anything else (put this here so that requests # for i.e. CSS items still succeed) if '0' not in self.wiki.user_all_UIDs(): return self.beginLogin() # make sure a valid item is being requested, not just # a spare .html file that's sitting there iname = self.wiki.itemname_from_fsname(base) if iname is None: return self.errormsg('No such item "%s"' % base, args, kwargs) # sanity - regenerate to make sure caller isn't trying to # force me to use a bad name full = self.fpath(makeFSname(iname)+'.html') # render as needed rendered = self.wiki.render(namelist=[iname], uid=self.loginUID(), logfunc=cherrypy.log.error) # sanity if len(rendered) != 1 or not rendered.has_key(iname): return self.errormsg( 'Error: Trying to render "%s", got "%s"' % (iname, str(rendered.keys())), args, kwargs) # turn off caching of response cherrypy.response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate' #if os.path.isfile(full): cherrypy.log.error("Serving %s" % rendered[iname][0]) metadata.log_pageview(self.metadb, args[0], cherrypy.session.get('UID',None), args, kwargs) cherrypy.response.headers['Content-Type'] = content_type_for_xhtml(cherrypy.request.headers) print "** Setting content-type:",cherrypy.response.headers['Content-Type'] return rendered[iname][1] #else: # return 'Unable to generate "%s"' % args[0] elif ext.lower() in ['.jpg', '.png', '.gif', '.jpeg']: # assume other images are OK to serve full = self.fpath(makeFSname(args[0])) if os.path.isfile(full): metadata.log_staticfile(self.metadb, full, cherrypy.session.get('UID',None), args, kwargs) return serve_file(full) else: return '' elif len(ext) == 0: # might be a content item missing the '.html' -- try it iname = self.wiki.itemname_from_fsname(base) if iname is not None: # yep, valid name missing .html - redirect # (I prefer this to rendering directly under the basename because # people will be less likely to bookmark the non-.html file. So bookmarks # won't break if you switch to static rendering with 'wik render'.) raise HTTPRedirect(base + '.html') msg = "ERROR - Cannot handle request\n\nargs=%s\n\nkws=%s\n\nheaders\n%s\n\nrequest-line\n%s" % \ (args,kwargs,str(cherrypy.request.headers),str(cherrypy.request.request_line)) return self.errormsg(msg, args, kwargs)
def create_rss_xml(wiki, url_site): """ Create RSS feed for the wiki. wiki: The WikklyWiki url_site: Site URL Returns UTF-8 encoded XML feed. """ # follows format at: # http://cyber.law.harvard.edu/rss/rss.html import wikklytext.version as version # load all items and sort by date (newest first) items = wiki.getall() items.sort(lambda a,b: cmp(b.mtime, a.mtime)) # remove items with '-rss' tag items = [item for item in items if not item.has_tag('-rss')] # remove items in skiplist skiplist = wiki.get_skiplist() items = [item for item in items if item.name not in skiplist] # create header # general note: I use xml_escape() more than strictly necessary, for sanity ind = u' ' rss = u'<?xml version="1.0" encoding="utf-8"?>\n' rss += u'<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">\n' rss += ind + u'<channel>\n' from wikklytext.wiki.core import SiteTitleText, SiteSubtitleText rss += ind*2 + u'<title>%s</title>\n' % xml_escape(SiteTitleText(wiki)) rss += ind*2 + u'<link>%s</link>\n' % xml_escape(url_site) rss += ind*2 + u'<description>%s</description>\n' % xml_escape(SiteSubtitleText(wiki)) if len(items): rss += ind*2 + u'<lastBuildDate>%s</lastBuildDate>\n' % xml_escape(items[0].mtime.to_rfc822()) rss += ind*2 + u'<generator>WikklyText %s</generator>\n' % xml_escape(version.VSTR) rss += ind*2 + u'<docs>http://blogs.law.harvard.edu/tech/rss</docs>\n' # per recommendation at feedvalidator.org rss += ind*2 + u'<atom:link href="%s/rss.xml" rel="self" type="application/rss+xml" />' % xml_escape(url_site) # add items if len(url_site) and url_site[-1] != '/': url_site += '/' for item in items: rss += ind*2 + u'<item>\n' rss += ind*3 + u'<title>%s</title>\n' % xml_escape(item.name) rss += ind*3 + u'<link>%s</link>\n' % xml_escape(url_site + makeFSname(item.name) + '.html') # use item digest as guid for item as well # (add 'safe_mode' setting to digest so caching will depend on it as well) digest = item.digest(str(wiki.user_get_safemode(item.author))) # render item to HTML. # render with UID of *author*, not logged-in user do_cache = wiki.cacheable(item) inner = render_inner_html(wiki, item.name, item.author) # going to place in a CDATA section, so escape anything that looks like ']]>' inner = unicode(inner, 'utf-8') inner = inner.replace(']]>', ']]∋>') # place HTML in CDATA so no other escaping is needed rss += ind*3 + u'<description><![CDATA[%s]]></description>\n' % inner # make sure client knows that my guid is not a link rss += ind*3 + u'<guid isPermaLink="false">%s</guid>\n' % xml_escape(digest) rss += ind*3 + u'<pubDate>%s</pubDate>\n' % xml_escape(item.mtime.to_rfc822()) rss += ind*2 + u'</item>\n' rss += ind + u'</channel>\n' rss += u'</rss>\n' return rss.encode('utf-8')
def _resolver(self, url_fragment, base_url, site_url): from urlparse import urlsplit, urlunsplit # I ignore the parsed scheme & netloc and use base_url instead. # This avoids confusion with names like "Name: Here" # (setting to 'http' does not work -- causes URLs like "http:///.." # which don't work ...) scheme,netloc,path,query,frag = urlsplit(url_fragment) #print "RESOLVER IN",repr(scheme),repr(netloc),repr(path),repr(query),repr(frag) if path[-7:] == 'rss.xml': # report as internal URL return (base_url + 'rss.xml', 'internal') # refs to 'files/*' are internal if path[:6] == 'files/': return (base_url + path, 'internal') # redirect DefaultTiddlers -> index if path == 'DefaultTiddlers': path = 'index' # see if it is a special name that won't exist in the wiki itself (either # an index-* file, or a command like DoServerCmd?cmd=...) if path in ['index', 'index-Names', 'index-Tags', 'index-Timeline','DoServerCmd']: # report it as an internal URL url = self.makeurl(base_url, path, query, frag) #print "RESOLVER OUT (1)",repr(url) return (url, 'internal') # if path is a tiddler name, return link to rendered file item = self.wiki.store().getitem(path) if item is None: # if that failed, try loading with entire URL (catches things like "Name??") item = self.wiki.store().getitem(url_fragment) if item: name = makeFSname(item.name) # only DoServerCmd is allowed to have a query, so leave query # empty here (this allows pages like "Name??" to be linked to). url = self.makeurl(base_url, name, '', frag) #print "RESOLVER OUT (2)",repr(url) # report it as an internal URL (in a sense, all tiddlers are anchors, # so this is logical to do here) return (url, 'internal') # if it looks like a regular URL, let default handler take it (XXX this is # copied from the lexer - maybe need to refactor later) if re.match(r"((http|https|file|ftp|gopher|mms|news|nntp|telnet)://[a-zA-Z0-9~\$\-_\.\#\+\!%/\?\=&]+(:?\:[0-9]+)?(?:[a-zA-Z0-9~\$\-_\.\#\+\!%/\?\=&]+)?)|(mailto:[a-zA-Z\._@]+)", url_fragment): return (None,None) # if its an anchor, do nothing if len(frag): return (None,None) # if its a local pathname, make link if os.path.exists(url_fragment): return ('file:///%s' % url_fragment, 'external') # else, make it an auto-item if auto wiki words are enabled if self.wiki.get_link_unknown_camelwords(): p = {'cmd': 'newItem', 'name': url_fragment} url = self.makeurl(base_url, 'DoServerCmd', urlencode(p), '') return (url, 'newitem') else: # do default URL resolution return (None,None)
def create_rss_xml(wiki, url_site): """ Create RSS feed for the wiki. wiki: The WikklyWiki url_site: Site URL Returns UTF-8 encoded XML feed. """ # follows format at: # http://cyber.law.harvard.edu/rss/rss.html import wikklytext.version as version # load all items and sort by date (newest first) items = wiki.getall() items.sort(lambda a, b: cmp(b.mtime, a.mtime)) # remove items with '-rss' tag items = [item for item in items if not item.has_tag('-rss')] # remove items in skiplist skiplist = wiki.get_skiplist() items = [item for item in items if item.name not in skiplist] # create header # general note: I use xml_escape() more than strictly necessary, for sanity ind = u' ' rss = u'<?xml version="1.0" encoding="utf-8"?>\n' rss += u'<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">\n' rss += ind + u'<channel>\n' from wikklytext.wiki.core import SiteTitleText, SiteSubtitleText rss += ind * 2 + u'<title>%s</title>\n' % xml_escape(SiteTitleText(wiki)) rss += ind * 2 + u'<link>%s</link>\n' % xml_escape(url_site) rss += ind * 2 + u'<description>%s</description>\n' % xml_escape( SiteSubtitleText(wiki)) if len(items): rss += ind * 2 + u'<lastBuildDate>%s</lastBuildDate>\n' % xml_escape( items[0].mtime.to_rfc822()) rss += ind * 2 + u'<generator>WikklyText %s</generator>\n' % xml_escape( version.VSTR) rss += ind * 2 + u'<docs>http://blogs.law.harvard.edu/tech/rss</docs>\n' # per recommendation at feedvalidator.org rss += ind * 2 + u'<atom:link href="%s/rss.xml" rel="self" type="application/rss+xml" />' % xml_escape( url_site) # add items if len(url_site) and url_site[-1] != '/': url_site += '/' for item in items: rss += ind * 2 + u'<item>\n' rss += ind * 3 + u'<title>%s</title>\n' % xml_escape(item.name) rss += ind * 3 + u'<link>%s</link>\n' % xml_escape( url_site + makeFSname(item.name) + '.html') # use item digest as guid for item as well # (add 'safe_mode' setting to digest so caching will depend on it as well) digest = item.digest(str(wiki.user_get_safemode(item.author))) # render item to HTML. # render with UID of *author*, not logged-in user do_cache = wiki.cacheable(item) inner = render_inner_html(wiki, item.name, item.author) # going to place in a CDATA section, so escape anything that looks like ']]>' inner = unicode(inner, 'utf-8') inner = inner.replace(']]>', ']]∋>') # place HTML in CDATA so no other escaping is needed rss += ind * 3 + u'<description><![CDATA[%s]]></description>\n' % inner # make sure client knows that my guid is not a link rss += ind * 3 + u'<guid isPermaLink="false">%s</guid>\n' % xml_escape( digest) rss += ind * 3 + u'<pubDate>%s</pubDate>\n' % xml_escape( item.mtime.to_rfc822()) rss += ind * 2 + u'</item>\n' rss += ind + u'</channel>\n' rss += u'</rss>\n' return rss.encode('utf-8')
def _resolver(self, url_fragment, base_url, site_url): from urlparse import urlsplit, urlunsplit # I ignore the parsed scheme & netloc and use base_url instead. # This avoids confusion with names like "Name: Here" # (setting to 'http' does not work -- causes URLs like "http:///.." # which don't work ...) scheme, netloc, path, query, frag = urlsplit(url_fragment) #print "RESOLVER IN",repr(scheme),repr(netloc),repr(path),repr(query),repr(frag) if path[-7:] == 'rss.xml': # report as internal URL return (base_url + 'rss.xml', 'internal') # refs to 'files/*' are internal if path[:6] == 'files/': return (base_url + path, 'internal') # redirect DefaultTiddlers -> index if path == 'DefaultTiddlers': path = 'index' # see if it is a special name that won't exist in the wiki itself (either # an index-* file, or a command like DoServerCmd?cmd=...) if path in [ 'index', 'index-Names', 'index-Tags', 'index-Timeline', 'DoServerCmd' ]: # report it as an internal URL url = self.makeurl(base_url, path, query, frag) #print "RESOLVER OUT (1)",repr(url) return (url, 'internal') # if path is a tiddler name, return link to rendered file item = self.wiki.store().getitem(path) if item is None: # if that failed, try loading with entire URL (catches things like "Name??") item = self.wiki.store().getitem(url_fragment) if item: name = makeFSname(item.name) # only DoServerCmd is allowed to have a query, so leave query # empty here (this allows pages like "Name??" to be linked to). url = self.makeurl(base_url, name, '', frag) #print "RESOLVER OUT (2)",repr(url) # report it as an internal URL (in a sense, all tiddlers are anchors, # so this is logical to do here) return (url, 'internal') # if it looks like a regular URL, let default handler take it (XXX this is # copied from the lexer - maybe need to refactor later) if re.match( r"((http|https|file|ftp|gopher|mms|news|nntp|telnet)://[a-zA-Z0-9~\$\-_\.\#\+\!%/\?\=&]+(:?\:[0-9]+)?(?:[a-zA-Z0-9~\$\-_\.\#\+\!%/\?\=&]+)?)|(mailto:[a-zA-Z\._@]+)", url_fragment): return (None, None) # if its an anchor, do nothing if len(frag): return (None, None) # if its a local pathname, make link if os.path.exists(url_fragment): return ('file:///%s' % url_fragment, 'external') # else, make it an auto-item if auto wiki words are enabled if self.wiki.get_link_unknown_camelwords(): p = {'cmd': 'newItem', 'name': url_fragment} url = self.makeurl(base_url, 'DoServerCmd', urlencode(p), '') return (url, 'newitem') else: # do default URL resolution return (None, None)