def map(request, file):
    start_map(request, file)
    file.write('<current>\n')
    for page in wikiutil.getPageList(request, objects=True):
        if (not command_line and
            not request.user.may.read(page)):    
            continue
        request.cursor.execute(
            """SELECT x, y, created_time, created_by, created_by_ip,
                      pagename_propercased, address
               FROM mapPoints
               WHERE wiki_id=%(wiki_id)s AND
                     pagename=%(pagename)s""",
            {'wiki_id': page.wiki_id, 'pagename': page.page_name})
        for mapitem in request.cursor.fetchall():
            d = {'pagename': page.page_name, 'x': mapitem[0],
                 'y': mapitem[1], 'created_time': mapitem[2],
                 'created_by': get_username(mapitem[3], request),
                 'created_by_ip': mapitem[4],
                 'pagename_propercased': mapitem[5],
                 'address': mapitem[6]}
            file.write('<point %s />\n' % generate_attributes(d))
    file.write('</current>\n')
    file.write('<old>\n')
    for page in wikiutil.getPageList(request, objects=True):
        if (not command_line and
            not request.user.may.read(page)):    
            continue

        request.cursor.execute(
            """SELECT x, y, created_time, created_by, created_by_ip,
                      pagename_propercased, address,
                      deleted_time, deleted_by, deleted_by_ip
               FROM oldMapPoints
               WHERE wiki_id=%(wiki_id)s AND
                     pagename=%(pagename)s""",
            {'wiki_id': page.wiki_id, 'pagename': page.page_name})
        for mapitem in request.cursor.fetchall():
            d = {'pagename': page.page_name, 'x': mapitem[0],
                 'y': mapitem[1], 'created_time': mapitem[2],
                 'created_by': get_username(mapitem[3], request),
                 'created_by_ip': mapitem[4],
                 'pagename_propercased': mapitem[5],
                 'address': mapitem[6], 'deleted_time': mapitem[7],
                 'deleted_by': mapitem[8],
                 'deleted_by_ip': get_username(mapitem[9], request)}
            file.write('<point %s />\n' % generate_attributes(d))
    file.write('</old>\n')

    end_map(request, file)
Exemple #2
0
def build_search_index():
    """
    Builds the title and full text search indexes.
    """
    if not config.has_xapian:
        print ("You don't have Xapian installed..."
               "skipping configuration of search index.")
        return

    if not os.path.exists(config.search_db_location):
        # create it if it doesn't exist, we don't want to create
        # intermediates though
        os.mkdir(config.search_db_location)

    # prune existing db directories, do this explicitly in case third party
    # extensions use this directory (they really shouldn't)
    for db in ('title', 'text'):
        dbpath = os.path.join(config.search_db_location, db)
        if os.path.exists(dbpath):
            shutil.rmtree(dbpath)

    print "Building search index..."
    from Sycamore import wikiutil, search
    pages = wikiutil.getPageList(req, objects=True)
    for page in pages:
        print "  %s added to search index." % page.page_name
        # don't use remote server on initial build
        search.add_to_index(page, try_remote=False) 
Exemple #3
0
def _get_user_pages(request):
    from Sycamore import user
    pages = []
    for page in wikiutil.getPageList(request, objects=True):
        if user.User(request, name=page.page_name).valid:
            # it's a user page 
            pages.append(page)
    return pages 
Exemple #4
0
def rebuild_all_caches():
    req = request.RequestDummy()
    wiki_list = wikiutil.getWikiList(req)
    for wiki_name in wiki_list:
       req.switch_wiki(wiki_name)
       plist = wikiutil.getPageList(req)
       maintenance.buildCaches(wiki_name, plist, doprint=True)
    req.db_disconnect()
Exemple #5
0
 def getPageList(self, alphabetize=True, lowercase=False, objects=False):
     """
     A cached version of wikiutil.getPageList().
     Also, this list is always sorted.
     """
     if self._all_pages is None:
         self._all_pages = wikiutil.getPageList(self,
             alphabetize=alphabetize, lowercase=lowercase, objects=objects)
     return self._all_pages
Exemple #6
0
def make_impossible_pages(request, numpages, max_length):
    """
    Make a list of numpages pagenames that can't exist
    (aren't in the list of all pages).
    """
    import random
    all_pages = [pagename.lower() for pagename in
        wikiutil.getPageList(request)]
    impossible = []
    for i in xrange(0, numpages):
        random_string = None
        random_string_lower = None
        while random_string_lower not in all_pages and random_string is None:
            random_string = make_random_string(max_length)
            random_string_lower = random_string.lower()
        impossible.append(random_string)
    return impossible
Exemple #7
0
    def _macro_titleindex(self, args, formatter=None):
        if not formatter:
            formatter = self.formatter
        _ = self._
        html = []
        index_letters = []
        allpages = int(self.form.get('allpages', [0])[0]) != 0
        pages = wikiutil.getPageList(self.request, alphabetize=False)
        pages_deco = [ (pagename.lower(), pagename) for pagename in pages ]
        pages_deco.sort()
        pages = [ word for lower_word, word in pages_deco ]
        current_letter = None
        for name in pages:
            if 1: #self.request.user.may.read(name):
                letter = name[0].upper()
                # XXX UNICODE - fix here, too?
                if wikiutil.isUnicodeName(letter):
                    try:
                        letter = wikiutil.getUnicodeIndexGroup(
                            unicode(name, config.charset))
                        if letter:
                            letter = letter.encode(config.charset)
                    except UnicodeError:
                        letter = None
                    if not letter: letter = "~"
                if letter not in index_letters:
                    index_letters.append(letter)
                if letter <> current_letter:
                    html.append('<a name="%s"><h3>%s</h3></a>' % (
                        wikiutil.quoteWikiname(letter),
                        letter.replace('~', 'Others')))
                    current_letter = letter
                else:
                    html.append('<br>')
                html.append('<a href="%s%s">%s</a>\n' %
                    (self.request.getScriptname(),
                     wikiutil.quoteWikiname(name), name))

        index = ''
        qpagename = wikiutil.quoteWikiname(self.formatter.page.page_name)
        index = index + _make_index_key(index_letters)
        return '%s%s' % (index, ''.join(html)) 
Exemple #8
0
common_css = ''.join(common_file.readlines())
common_file.close()
print_file = open(os.path.join(css_location, 'print.css'))
print_css = ''.join(print_file.readlines())
print_file.close()

logo_background_file = open(os.path.join(img_location, 'floater.png'))
logo_background = ''.join(logo_background_file.readlines())
logo_background_file.close()
if config.image_logo:
  logo_file = open(os.path.join(config.web_root, config.url_prefix[1:], config.image_logo))
  logo = ''.join(logo_file.readlines())
  logo_background_file.close()


flat_page_dict = { 'Wiki Settings': buildDB.basic_pages['Wiki Settings'],
'Wiki Settings/CSS': buildDB.basic_pages['Wiki Settings/CSS'],
'Wiki Settings/Images': buildDB.basic_pages['Wiki Settings/Images'] }

flat_page_dict['Wiki Settings/CSS'].files = [ ("screen.css", screen_css), ("common.css", common_css), ("print.css", print_css) ]
images_list = [ ("logo_background.png", logo_background) ]
if config.image_logo: images_list.append(("logo.png", logo))
flat_page_dict['Wiki Settings/Images'].files = images_list

buildDB.insert_pages(cursor, flat_page_dict)

plist = wikiutil.getPageList(req)
maintenance.buildCaches(plist)

req.db_disconnect()
Exemple #9
0
def clear_page_caches(request):
    plist = wikiutil.getPageList(request)
    maintenance.clearCaches(request.config.wiki_name, plist, doprint=False,
                            req=request)
Exemple #10
0
def build_search_index(request):
    pages = wikiutil.getPageList(request, objects=True)
    for page in pages:
        search.add_to_index(page)
Exemple #11
0
    def scandicts(self, force_update=False, update_pagename=None):
        """
        scan all pages matching the dict / group regex and init the dictdict
        """
        global DICTS_PICKLE_VERSION
        dump = 0
        if config.memcache:
          DICTS_DATA = self.request.mc.get("dicts_data")
        else:
          DICTS_DATA = {}

        if DICTS_DATA and not force_update:
            self.__dict__.update(DICTS_DATA)
        else:
            DICTS_DATA = {}
            try:
                picklefile = config.data_dir + '/dicts.pickle'
                data = pickle.load(open(picklefile))
                self.__dict__.update(data)
                if self.picklever != DICTS_PICKLE_VERSION:
                    self.reset()
                    dump = 1
                if config.memcache:
                  self.request.mc.add('dicts_data', data)
            except:
                self.reset()

        # init the dicts the first time
        if not self.namespace_timestamp or force_update:
            now = time.time()
            if force_update and update_pagename:
               self.addgroup(update_pagename)
            else:
              import re
              group_re = re.compile(config.page_group_regex, re.IGNORECASE)
              pagelist = wikiutil.getPageList(self.request)
              grouppages = filter(group_re.search, pagelist)
              #print '%s -> %s' % (config.page_group_regex, grouppages)
              for pagename in grouppages:
                  if not self.dictdict.has_key(pagename):
                      self.addgroup(pagename)
            self.namespace_timestamp = now
            dump = 1

        data = {
            "namespace_timestamp": self.namespace_timestamp,
            "pageupdate_timestamp": self.pageupdate_timestamp,
            "dictdict": self.dictdict,
            "picklever": self.picklever,
        }
        if dump:
            for pagename in self.dictdict:
                if update_pagename or group_re.search(pagename):
                    group = self.dictdict[pagename.lower()]
                    group.expandgroups(self)

            if config.memcache: self.request.mc.set('dicts_data', data)
            pickle.dump(data, open(picklefile, 'w'), True)
            try:
                os.chmod(picklefile, 0666 & config.umask)
            except OSError:
                pass
Exemple #12
0
    def process(self):
        # processes the search
        wiki_name = self.request.config.wiki_name
        if not self.wiki_global:
            wikis = [wiki_name]
        else:
            wikis = wikiutil.getWikiList(self.request)

        for wiki_name in wikis: 
            pagelist = wikiutil.getPageList(self.request)
            matches = []
            for pagename in pagelist:
                page = Page(pagename, self.request, wiki_name=wiki_name)
                text = page.get_raw_body()
                text_matches = find_num_matches(self.regexp, text)
                if text_matches:
                    percentage = (text_matches*1.0/len(text.split()))*100
                    self.text_results.append(searchResult(page.page_name, text,
                                                          percentage,
                                                          page.page_name,
                                                          wiki_name)) 
              
                title = page.page_name
                title_matches = find_num_matches(self.regexp, title)
                if title_matches:
                      percentage = (title_matches*1.0/len(title.split()))*100
                      self.title_results.append(searchResult(title, title,
                                                             percentage,
                                                             page.page_name,
                                                           wiki_name))
            # sort the title and text results by relevancy
            self.title_results.sort(lambda x,y: cmp(y.percentage,
                                                    x.percentage))
            self.text_results.sort(lambda x,y: cmp(y.percentage,
                                                   x.percentage))

            # normalize the percentages.
            # still gives shit, but what can you expect from regexp?
            # install xapian!
            if self.title_results:
                i = 0
                max_title_percentage = self.title_results[0].percentage
                self.title_results = self.title_results[
                    self.t_start_loc:self.t_start_loc+self.num_results+1]
                for title in self.title_results:
                    if i > self.num_results:
                        break
                    title.percentage = title.percentage/max_title_percentage
                    title.percentage = title.percentage*100
                    i += 1

            if self.text_results: 
                i = 0 
                max_text_percentage = self.text_results[0].percentage
                self.text_results = self.text_results[
                    self.p_start_loc:self.p_start_loc+self.num_results+1]
                for text in self.text_results:
                    if i > self.num_results:
                        break
                    text.percentage = text.percentage/max_text_percentage
                    text.percentage = text.percentage*100
                    i += 1
Exemple #13
0
            _('Invalid include arguments "%s"!')) % (args,)

    # get the pages
    inc_pattern = args.group('pattern')
    if args.group('level'):
        level = int(args.group('level'))
    else:
        level = 1

    try:
        needle_re = re.compile(inc_pattern, re.IGNORECASE)
    except re.error, e:
        return ('<p><strong class="error">%s</strong></p>' %
            _("ERROR in regex '%s'") % (inc_pattern,), e)

    all_pages = wikiutil.getPageList(macro.request)
    hits = filter(needle_re.search, all_pages)
    hits.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        hits.reverse()
    max_items = args.group('items')
    if max_items:
        hits = hits[:int(max_items)]

    for inc_name in hits:
        params = '%s,"%s"' % (inc_name,inc_name)
        ret = ret +"<p>"+ Sycamore.macro.include.execute(macro, params, formatter=formatter) +"\n"

    # return include text
    return ret
Exemple #14
0
def map(request, file):
    start_map(request, file)
    file.write('<current>\n')
    for page in wikiutil.getPageList(request, objects=True):
        if (not command_line and not request.user.may.read(page)):
            continue
        request.cursor.execute(
            """SELECT x, y, created_time, created_by, created_by_ip,
                      pagename_propercased, address
               FROM mapPoints
               WHERE wiki_id=%(wiki_id)s AND
                     pagename=%(pagename)s""", {
                'wiki_id': page.wiki_id,
                'pagename': page.page_name
            })
        for mapitem in request.cursor.fetchall():
            d = {
                'pagename': page.page_name,
                'x': mapitem[0],
                'y': mapitem[1],
                'created_time': mapitem[2],
                'created_by': get_username(mapitem[3], request),
                'created_by_ip': mapitem[4],
                'pagename_propercased': mapitem[5],
                'address': mapitem[6]
            }
            file.write('<point %s />\n' % generate_attributes(d))
    file.write('</current>\n')
    file.write('<old>\n')
    for page in wikiutil.getPageList(request, objects=True):
        if (not command_line and not request.user.may.read(page)):
            continue

        request.cursor.execute(
            """SELECT x, y, created_time, created_by, created_by_ip,
                      pagename_propercased, address,
                      deleted_time, deleted_by, deleted_by_ip
               FROM oldMapPoints
               WHERE wiki_id=%(wiki_id)s AND
                     pagename=%(pagename)s""", {
                'wiki_id': page.wiki_id,
                'pagename': page.page_name
            })
        for mapitem in request.cursor.fetchall():
            d = {
                'pagename': page.page_name,
                'x': mapitem[0],
                'y': mapitem[1],
                'created_time': mapitem[2],
                'created_by': get_username(mapitem[3], request),
                'created_by_ip': mapitem[4],
                'pagename_propercased': mapitem[5],
                'address': mapitem[6],
                'deleted_time': mapitem[7],
                'deleted_by': mapitem[8],
                'deleted_by_ip': get_username(mapitem[9], request)
            }
            file.write('<point %s />\n' % generate_attributes(d))
    file.write('</old>\n')

    end_map(request, file)
Exemple #15
0
def get_group_members(groupname, request):
  members = {}
  page_text = Page(groupname, request).get_raw_body(fresh=True)
  for line in page_text.split('\n'):
    if line.startswith(' *'):
      username = line[len(' *'):].strip()
      members[username.lower()] = None
  return members
    
if wiki_name:
  req = request.RequestDummy(wiki_name=wiki_name)
else: 
  req = request.RequestDummy()

print "Removing/converting old ACL system to new system..."
plist = wikiutil.getPageList(req, objects=True)
for page in plist:
  print "  ", page.page_name
  page_text = page.get_raw_body(fresh=True)
  lines = page_text.split('\n')
  for line in lines:
    if line.startswith('#acl '):
      groupdict = parseACL(line[len('#acl '):])
      wikiacl.setACL(page.page_name, groupdict, req)
      new_page_text = remove_acl(page_text)
      req.cursor.execute("UPDATE curPages set text=%(page_text)s where name=%(page_name)s and wiki_id=%(wiki_id)s", {'page_text':new_page_text, 'page_name':page.page_name, 'wiki_id':req.config.wiki_id}, isWrite=True)
      req.cursor.execute("UPDATE allPages set text=%(page_text)s where name=%(page_name)s and wiki_id=%(wiki_id)s and editTime=%(mtime)s", {'page_text':new_page_text, 'page_name':page.page_name, 'wiki_id':req.config.wiki_id, 'mtime':page.mtime()}, isWrite=True)
      print "    ", "...converted!"
      break

print "Adding new user groups.."