Esempio n. 1
0
def wiki(page):
    s = Store(path_for(settings.content.path))
    try:
        result = s.get_page(page.lower())
    except:
        result = s.get_page('meta/EmptyPage')

    return result
Esempio n. 2
0
def wiki(page):
    """Render a wiki page"""

    s = Store(path_for(settings.content.path))
    try:
        result = s.get_page(page.lower())
    except Exception, e:
        log.warn("%s rendering page %s" % (e, page))
        result = s.get_page('meta/EmptyPage')
Esempio n. 3
0
def wiki(page):
    """Render a wiki page"""

    s = Store(path_for(settings.content.path))
    try:
        result = s.get_page(page.lower())
    except Exception, e:
        log.warn("%s rendering page %s" % (e, page))
        result = s.get_page('meta/EmptyPage')
Esempio n. 4
0
    def load(self):
        # load Aliases
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("Aliases: no %s definitions" % self.meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'], page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(
            map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (link, replacement) = line.strip().split(' ', 1)
                self.aliases[link] = replacement
                self.aliases[link.replace('_', ' ')] = replacement
            except ValueError:
                log.warn("skipping line '%s'" % line)
                pass
        self.mtime = time.time()
Esempio n. 5
0
    def load(self):
        # Load acronym map
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("no %s definitions" % meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash

        for line in all_sections.split('\n'):
            try:
                (acronym, expansion) = line.split(' ',1)
                self.acronyms[acronym.lower()] = expansion
            except ValueError: # skip blank lines or with more than two fields
                log.debug("skipping line '%s'" % line)
                pass
Esempio n. 6
0
    def load(self):
        # load InterWikiMap
        s = Store()
        try:
            page = s.get_page(self.wiki_map)
        except:
            log.warn("InterWikiMap: no %s definitions" % self.wiki_map)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (schema, url) = line.strip().split(' ',1)
                self.schemas[schema.lower()] = h.unescape(url) #url.replace("&amp;","&")
            except ValueError:
                log.warn("skipping line '%s'" % line)
                pass
        self.mtime = time.time()
Esempio n. 7
0
    def load(self):
        # load InterWikiMap
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("InterWikiMap: no %s definitions" % self.meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (schema, url) = line.strip().split(' ',1)
                self.schemas[schema.lower()] = h.unescape(url) #url.replace("&amp;","&")
            except ValueError:
                log.debug("skipping line '%s'" % line)
                pass
        self.mtime = time.time()
Esempio n. 8
0
    def load(self):
        # Load acronym map
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("no %s definitions" % meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'], page['content-type']))

        all_sections = u''.join(
            map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash

        for line in all_sections.split('\n'):
            try:
                (acronym, expansion) = line.split(' ', 1)
                self.acronyms[acronym.lower()] = expansion
            except ValueError:  # skip blank lines or with more than two fields
                log.debug("skipping line '%s'" % line)
                pass
Esempio n. 9
0
    def load(self):
        # load Aliases
        s = Store()
        try:
            page = s.get_page(self.meta_page)
        except:
            log.warn("Aliases: no %s definitions" % self.meta_page)
            return

        # prepare to parse only <pre> tags (so that we can have multiple maps organized by sections)
        soup = BeautifulSoup(render_markup(page['data'],page['content-type']))
        h = HTMLParser.HTMLParser()

        all_sections = u''.join(map(lambda t: str(t.string), soup.find_all('pre'))).strip()
        # now that we have the full map, let's build the schema hash
        for line in all_sections.split('\n'):
            try:
                (link, replacement) = line.strip().split(' ',1)
                self.aliases[link] = replacement
                self.aliases[link.replace('_',' ')] = replacement
            except ValueError:
                log.warn("skipping line '%s'" % line)
                pass
        self.mtime = time.time()
Esempio n. 10
0
class WikiController(object):

    def __init__(self, settings):
        """Initialize the controler and preload basic metadata"""

    	self.redis = Redis(host=settings.redis.bind_address, port=settings.redis.port)
    	self.store = Store(settings.content.path)
        self.get_all_pages()   # page modification times
        self.get_all_aliases() # page aliases


    def get_page(self, path):
        """Returns a single page"""

        if path in self.store.pages:
            return self.store.get_page(path)
        raise KeyError


    def resolve_alias(self, path):
        """Attempts to resolve an alias to a page"""

        # Check locally first, to save overhead
        if path in self.store.aliases:
            return self.store.aliases[path]

        # Check if there's been an update in Redis
        alias = self.redis.hget(META_ALIASES, path)
        if alias:
            self.store.aliases[path] = alias
            return alias
        
        return None


    def get_all_pages(self):
        """Returns a hash of all known pages and mtimes"""

        if not len(self.store.pages):
            if self.redis.exists(META_PAGES):
                self.store.pages = self.redis.hgetall(META_PAGES)
            else:
                # force filesystem scan and alias generation
                pages = self.store.get_all_pages()
                log.debug(pages)
                self.redis.hmset(META_PAGES,self.store.get_all_pages())
        return self.store.pages


    def get_all_aliases(self):
        """Returns a hash of all known page aliases"""

        if not len(self.store.aliases):
            if self.redis.exists(META_ALIASES):
                self.store.aliases = self.redis.hgetall(META_ALIASES)
            else:
                # force filesystem scan and alias generation
                self.store.get_all_pages()
                self.redis.hmset(META_ALIASES, self.store.aliases)
        return self.store.aliases


    def get_close_matches_for_page(self, path):
        """Get a list of close matches for a given page/path"""

        pages = self.get_all_pages()
        return get_close_matches(path, pages.keys())