def run(self, serial, tag, tagname, pagename, soup, request, response): try: source = tag['src'] (schema,host,path,parameters,query,fragment) = urlparse.urlparse(source) attachment = wc.get_attachment(pagename,path) if attachment: buffer = attachment.data else: tag.replaceWith(_('error_include_file')) return False except KeyError: buffer = u''.join(tag.find_all(text=re.compile('.+'))).strip() try: lexer = tag['syntax'].lower() except KeyError: lexer = 'text' # TODO: find a way to do inline styles with the correct color scheme when formatting for RSS if request is False: # we're formatting for RSS lexer = 'text' lexer = get_lexer_by_name(lexer) formatter = HtmlFormatter(linenos=False, cssclass='syntax') result = highlight(buffer, lexer, formatter) tag.replace_with(BeautifulSoup(result.strip())) return False # no other plugin should process this tag
def media_asset(item): """Return page attachments""" a = wc.get_attachment(os.path.dirname(item), os.path.basename(item)) if not a: abort(404, "File not found") response.content_type = a.mime_type return a.data
def wiki(page): """Render a wiki page""" if ids.is_suspicious(request): abort(403, "Temporarily blocked due to suspicious activity") p = wc.get_page(page) try: return {'headers': p.headers, 'data': p.body, 'content-type': p.mime_type} except Exception as e: # fallback to index/aliases/levenshtein log.debug("Attempting to resolve aliases for %s" % page) original = wc.resolve_alias(page) if original and original != page: redirect("%s/%s" % (settings.wiki.base, original)) log.debug("Attempting to find close matches for %s" % page) close = wc.get_close_matches_for_page(page) if len(close): redirect("%s/%s" % (settings.wiki.base, close[0])) ids.flag(request) abort(404, "Page not found") return result
def __init__(self): self.acronyms = wc.get_acronym_map() log.debug(self)
def run(self, serial, tag, tagname, pagename, soup, request, response): try: uri = tag['href'] except KeyError: return True # Try to handle relative URIs if uri[0] == '.': uri = posixpath.normpath(os.path.join(pagename, uri)) # Try to handle the uri as a schema/path pair schema = '' path = uri try: schema, path = uri.split(':',1) except: pass known = False if schema == '': alias = wc.resolve_alias(path) if alias and alias != path: path = tag['href'] = uri = alias if path in wc.get_page_mtimes().keys(): known = True if(schema == ''): if wc.get_attachment(pagename, path): tag['href'] = unicode(settings.wiki.media + "/" + pagename + "/" + path) tag['title'] = self.schemas['attach']['title'] % {'uri':os.path.basename(path)} tag['class'] = self.schemas['attach']['class'] return False if(known): # this is a known Wiki link, so there is no need to run it through more plugins if request is False: # check for a direct outbound link # TODO: check x-link handling if path in wc.link_overrides: uri = wc.link_overrides[path] (schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri) tag['href'] = uri tag['title'] = self.schemas[schema]['title'] % {'uri':uri} tag['class'] = self.schemas[schema]['class'] return False tag['href'] = settings.wiki.base + '/' + uri tag['class'] = "wiki" try: # to use indexed metadata to annotate links last = i.page_info[path]['last-modified'] tag['title'] = _('link_update_format') % (path,time_since(last)) except: tag['title'] = _('link_defined_notindexed_format') % path elif('#' == uri[0]): # this is an in-page anchor if request != False: tag['href'] = request.path + uri tag['class'] = "anchor" else: if request is False: # remove unknown wiki links for RSS feeds tag.replace_with(tag.contents[0]) # format for online viewing try: exists = tag['class'] return True #we're done here, but this tag may need handling elsewhere except: tag['href'] = settings.wiki.base + '/' + uri tag['class'] = "wikiunknown" tag['title'] = _('link_undefined_format') % path elif(schema in self.schemas.keys()): # this is an external link, so reformat it tag['title'] = self.schemas[schema]['title'] % {'uri':uri} tag['class'] = self.schemas[schema]['class'] #tag['target'] = '_blank' else: # assume this is an interwiki link (i.e., it seems to have a custom schema) tag['title'] = _('link_interwiki_format') % uri tag['class'] = "interwiki" tag['target'] = '_blank' # Signal that this tag needs further processing return True # We're done return False
def preload_pages(pages, prefix): for p in pages: log.warn(p) wc.get_page(p["page_name"]) memcache.delete(prefix, namespace=NS_BATCH)
def __init__(self): self.schemas = wc.get_interwiki_map() log.debug(self)
def __init__(self): self.aliases = wc.get_wiki_aliases() log.debug(self)