def getAttachUrl(pagename, filename, request, addts=0, escaped=0, do='get'): """ Get URL that points to attachment `filename` of page `pagename`. If 'addts' is true, a timestamp with the file's modification time is added, so that browsers reload a changed file. """ if htdocs_access(request): # direct file access via webserver timestamp = '' if addts: try: timestamp = '?ts=%s' % os.path.getmtime( getFilename(request, pagename, filename)) except IOError: pass url = "%s/%s/attachments/%s%s" % ( request.cfg.attachments['url'], wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename), timestamp) else: # send file via CGI if do not in ['get', 'view']: do = 'get' url = "%s/%s?action=%s&do=%s&target=%s" % ( request.getScriptname(), wikiutil.quoteWikinameURL(pagename), action_name, do, wikiutil.url_quote_plus(filename)) if escaped: url = wikiutil.escape(url) return url
def write(self, pages_dir): """ write a page, including revisions, log, attachments to disk """ if ('PAGE', self.name) in self.renames: name_new = self.renames[('PAGE', self.name)] if name_new != self.name: print "Renaming page %r -> %r" % (self.name, name_new) self.name_old = self.name self.name = name_new qpagename = wikiutil.quoteWikinameFS(self.name) page_dir = opj(pages_dir, qpagename) os.makedirs(page_dir) # write current file current = self.current if current is not None: if create_rev and not self.is_deleted: current += 1 current_fname = opj(page_dir, 'current') current_file = file(current_fname, "w") current_str = '%08d\n' % current current_file.write(current_str) current_file.close() # copy edit-log if self.editlog is not None: editlog_fname = opj(page_dir, 'edit-log') self.editlog.copy(editlog_fname, self.renames, deleted=self.is_deleted) # copy page revisions if self.revisions is not None: rev_dir = opj(page_dir, 'revisions') os.makedirs(rev_dir) for rev in self.revlist: if create_rev: self.revisions[rev].copy(rev_dir, self.renames) else: if int(rev) == self.current: self.revisions[rev].copy(rev_dir, self.renames, convert=True) else: self.revisions[rev].copy(rev_dir, self.renames) if create_rev and not self.is_deleted: self.revisions[rev].copy(rev_dir, self.renames, convert=True, new_rev=rev + 1) # copy attachments if self.attachments is not None: attach_dir = opj(page_dir, 'attachments') os.makedirs(attach_dir) for fn, att in self.attachments.items(): # we have to check for renames here because we need the (old) pagename, too: if ('FILE', self.name_old, fn) in self.renames: fn_new = self.renames[('FILE', self.name_old, fn)] if fn_new != fn: print "Renaming file %r %r -> %r" % (self.name_old, fn, fn_new) att.name = fn_new att.copy(attach_dir)
def add(self, request, mtime, rev, action, pagename, host=None, extra=u'', comment=u''): """ Generate a line for the editlog. If `host` is None, it's read from request vars. """ import socket if host is None: host = request.remote_addr try: hostname = socket.gethostbyaddr(host)[0] except socket.error: hostname = host remap_chars = {u'\t': u' ', u'\r': u' ', u'\n': u' ',} comment = comment.translate(remap_chars) user_id = request.user.valid and request.user.id or '' if self.uid_override != None: user_id = '' hostname = self.uid_override host = '' line = u"\t".join((str(long(mtime)), # has to be long for py 2.2.x "%08d" % rev, action, wikiutil.quoteWikinameFS(pagename), host, hostname, user_id, extra, comment, )) + "\n" self._add(line)
def copypage(self, request, rootdir, pagename): """ quick and dirty! """ pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename)) os.makedirs(pagedir) # write a "current" file with content "00000001" revstr = '%08d' % 1 cf = os.path.join(pagedir, 'current') file(cf, 'w').write(revstr + '\n') # create a single revision 00000001 revdir = os.path.join(pagedir, 'revisions') os.makedirs(revdir) tf = os.path.join(revdir, revstr) p = Page(request, pagename) text = p.get_raw_body().replace("\n", "\r\n") codecs.open(tf, 'wb', config.charset).write(text) source_dir = AttachFile.getAttachDir(request, pagename) if os.path.exists(source_dir): dest_dir = os.path.join(pagedir, "attachments") os.makedirs(dest_dir) for filename in os.listdir(source_dir): source_file = os.path.join(source_dir, filename) dest_file = os.path.join(dest_dir, filename) shutil.copyfile(source_file, dest_file)
def copypage(self, request, rootdir, pagename): """ quick and dirty! """ pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename)) os.makedirs(pagedir) # write a "current" file with content "00000001" revstr = '%08d' % 1 cf = os.path.join(pagedir, 'current') file(cf, 'w').write(revstr+'\n') # create a single revision 00000001 revdir = os.path.join(pagedir, 'revisions') os.makedirs(revdir) tf = os.path.join(revdir, revstr) p = Page(request, pagename) text = p.get_raw_body().replace("\n", "\r\n") codecs.open(tf, 'wb', config.charset).write(text) source_dir = AttachFile.getAttachDir(request, pagename) if os.path.exists(source_dir): dest_dir = os.path.join(pagedir, "attachments") os.makedirs(dest_dir) for filename in os.listdir(source_dir): source_file = os.path.join(source_dir, filename) dest_file = os.path.join(dest_dir, filename) shutil.copyfile(source_file, dest_file)
def _load_group(self): request = self.request group_name = self.name page = Page(request, group_name) if page.exists(): arena = 'pagegroups' key = wikiutil.quoteWikinameFS(group_name) cache = caching.CacheEntry(request, arena, key, scope='wiki', use_pickle=True) try: cache_mtime = cache.mtime() page_mtime = wikiutil.version2timestamp(page.mtime_usecs()) # TODO: fix up-to-date check mtime granularity problems. # # cache_mtime is float while page_mtime is integer # The comparision needs to be done on the lowest type of both if int(cache_mtime) > int(page_mtime): # cache is uptodate return cache.content() else: raise caching.CacheError except caching.CacheError: # either cache does not exist, is erroneous or not uptodate: recreate it members, member_groups = super(WikiGroup, self)._load_group() cache.update((members, member_groups)) return members, member_groups else: raise GroupDoesNotExistError(group_name)
def _load_dict(self): request = self.request dict_name = self.name page = Page(request, dict_name) if page.exists(): arena = "pagedicts" key = wikiutil.quoteWikinameFS(dict_name) cache = caching.CacheEntry(request, arena, key, scope="wiki", use_pickle=True) try: cache_mtime = cache.mtime() page_mtime = wikiutil.version2timestamp(page.mtime_usecs()) # TODO: fix up-to-date check mtime granularity problems. # # cache_mtime is float while page_mtime is integer # The comparision needs to be done on the lowest type of both if int(cache_mtime) > int(page_mtime): # cache is uptodate return cache.content() else: raise caching.CacheError except caching.CacheError: # either cache does not exist, is erroneous or not uptodate: recreate it d = super(WikiDict, self)._load_dict() cache.update(d) return d else: raise DictDoesNotExistError(dict_name)
def crossquery(querystring, lg): url = 'http://www.galoes.org/grammars/%s/%s' width = 20 d = '/var/wiki' results = [] qp = xapian.QueryParser() database = xapian.Database('/var/wiki/xapian/%s/index'%lg) qp.set_database(database) msc = MoinSearchConnection(database) qresults = msc.get_all_documents(query=qp.parse_query(querystring)) for r in qresults: wikiname = r.data['title'][0] wikinamefs = wikiutil.quoteWikinameFS(wikiname) try: refv = '%s/%s/pages/%s/current'%(d,lg,wikinamefs) revnr = open(refv).read().strip() contentf = '%s/%s/pages/%s/revisions/%s'%(d,lg,wikinamefs,revnr) content = open(contentf).read().decode('utf8') except IOError: print "File not Found", wikinamefs continue matches = re.findall(u'%s%s%s' % ('.'+'{,%i}'%width,querystring.lower(),'.'+'{,%i}'%width),content.lower()) print matches results.append({'link':url%(lg,wikinamefs), 'totalmatches':len(matches), 'matches':matches, 'name':wikiname, }) return results
def _attachment(request, pagename, filename, outputdir, **kw): filename = filename.encode(config.charset) source_dir = AttachFile.getAttachDir(request, pagename) source_file = os.path.join(source_dir, filename) dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename)) dest_file = os.path.join(dest_dir, filename) dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename)) if os.access(source_file, os.R_OK): if not os.access(dest_dir, os.F_OK): try: os.makedirs(dest_dir) except: script.fatal("Cannot create attachment directory '%s'" % dest_dir) elif not os.path.isdir(dest_dir): script.fatal("'%s' is not a directory" % dest_dir) shutil.copyfile(source_file, dest_file) script.log('Writing "%s"...' % dest_url) return dest_url else: return ""
def write(self, pages_dir): """ write a page, including revisions, log, attachments to disk """ if ('PAGE', self.name) in self.renames: name_new = self.renames[('PAGE', self.name)] if name_new != self.name: print "Renaming page %r -> %r" % (self.name, name_new) self.name_old = self.name self.name = name_new qpagename = wikiutil.quoteWikinameFS(self.name) page_dir = opj(pages_dir, qpagename) os.makedirs(page_dir) # write current file current = self.current if current is not None: if create_rev and not self.is_deleted: current += 1 current_fname = opj(page_dir, 'current') current_file = file(current_fname, "w") current_str = '%08d\n' % current current_file.write(current_str) current_file.close() # copy edit-log if self.editlog is not None: editlog_fname = opj(page_dir, 'edit-log') self.editlog.copy(editlog_fname, self.renames, deleted=self.is_deleted) # copy page revisions if self.revisions is not None: rev_dir = opj(page_dir, 'revisions') os.makedirs(rev_dir) for rev in self.revlist: if create_rev: self.revisions[rev].copy(rev_dir, self.renames) else: if int(rev) == self.current: self.revisions[rev].copy(rev_dir, self.renames, convert=True) else: self.revisions[rev].copy(rev_dir, self.renames) if create_rev and not self.is_deleted: self.revisions[rev].copy(rev_dir, self.renames, convert=True, new_rev=rev+1) # copy attachments if self.attachments is not None: attach_dir = opj(page_dir, 'attachments') os.makedirs(attach_dir) for fn, att in self.attachments.items(): # we have to check for renames here because we need the (old) pagename, too: if ('FILE', self.name_old, fn) in self.renames: fn_new = self.renames[('FILE', self.name_old, fn)] if fn_new != fn: print "Renaming file %r %r -> %r" % (self.name_old, fn, fn_new) att.name = fn_new att.copy(attach_dir)
def add(self, request, mtime, rev, action, pagename, host=None, extra=u'', comment=u''): """ Generate (and add) a line to the edit-log. If `host` is None, it's read from request vars. """ if request.cfg.log_remote_addr or self.force_ip: if host is None: host = request.remote_addr or '' if request.cfg.log_reverse_dns_lookups: import socket try: hostname = socket.gethostbyaddr(host)[0] hostname = unicode(hostname, config.charset) except (socket.error, UnicodeError): hostname = host else: hostname = host else: host = '' hostname = '' comment = wikiutil.clean_input(comment) user_id = request.user.valid and request.user.id or '' if self.uid_override is not None: user_id = '' hostname = self.uid_override host = '' line = u"\t".join(( str(long(mtime)), # has to be long for py 2.2.x "%08d" % rev, action, wikiutil.quoteWikinameFS(pagename), host, hostname, user_id, extra, comment, )) + "\n" self._add(line)
def makepage(rootdir, pagename, text): """quick and dirty!""" pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename)) os.makedirs(pagedir) revstr = '%08d' % 1 cf = os.path.join(pagedir, 'current') open(cf, 'w').write(revstr+'\n') revdir = os.path.join(pagedir, 'revisions') os.makedirs(revdir) tf = os.path.join(revdir, revstr) text = text.replace("\n","\r\n") codecs.open(tf, 'wb', config.charset).write(text)
def qf_convert_string(str, enc_from, enc_to): """ Convert filename from pre patch 78 quoting to new quoting The old quoting function from patch 77 can convert name ONLY from the old way to the new, so if you have a partially converted directory, as it the situation as of moin--main--1.3--patch-86, it does not work. The new unquoting function is backward compatible, and can unquote both post and pre patch 78 file names. """ str = wikiutil.unquoteWikiname(str, [enc_from]) str = wikiutil.quoteWikinameFS(str, enc_to) return str
def getAttachDir(request, pagename, create=0): """ Get directory where attachments for page `pagename` are stored. """ if htdocs_access(request): # direct file access via webserver, from public htdocs area pagename = wikiutil.quoteWikinameFS(pagename) attach_dir = os.path.join(request.cfg.attachments['dir'], pagename, "attachments") if create and not os.path.isdir(attach_dir): filesys.makeDirs(attach_dir) else: # send file via CGI, from page storage area attach_dir = Page(request, pagename).getPagePath("attachments", check_create=create) return attach_dir
def add(self, request, mtime, rev, action, pagename, host=None, extra=u"", comment=u""): """ Generate (and add) a line to the edit-log. If `host` is None, it's read from request vars. """ if request.cfg.log_remote_addr or self.force_ip: if host is None: host = request.remote_addr or "" if request.cfg.log_reverse_dns_lookups: import socket try: hostname = socket.gethostbyaddr(host)[0] hostname = unicode(hostname, config.charset) except (socket.error, UnicodeError): hostname = host else: hostname = host else: host = "" hostname = "" comment = wikiutil.clean_input(comment) user_id = request.user.valid and request.user.id or "" if self.uid_override is not None: user_id = "" hostname = self.uid_override host = "" line = ( u"\t".join( ( str(long(mtime)), # has to be long for py 2.2.x "%08d" % rev, action, wikiutil.quoteWikinameFS(pagename), host, hostname, user_id, extra, comment, ) ) + "\n" ) self._add(line)
def history(self, request): files = self._list_files(request) files = sorted(files, lambda x,y:os.path.getmtime(x) < os.path.getmtime(y), reverse=True) _usercache = {} for filename in files: result = editlog.EditLogLine(_usercache) result.ed_time_usecs = wikiutil.timestamp2version(os.path.getmtime(filename)) result.rev = 0 result.action = 'SAVE' result.pagename = wikiutil.quoteWikinameFS(os.path.splitext(os.path.basename(filename))[0].decode(request.cfg.fs_encoding)) result.addr = '' result.hostname = '' result.userid = '' result.extra = None result.comment = '' yield result
def write(self, fname, deleted=False): """ write complete edit-log to disk """ if self.data: editlog = self.data.items() editlog.sort() f = file( fname, 'wb' ) # write in binary mode, so it stays exactly as we write it, even on windows. # the code in MoinMoin.logfile also uses binary mode and writes \n only. max_rev = 0 for key, fields in editlog: timestamp, rev, action, pagename, ip, hostname, userid, extra, comment = fields if action.startswith('ATT'): try: fname = urllib.unquote(extra).decode('utf-8') except UnicodeDecodeError: fname = urllib.unquote(extra).decode('iso-8859-1') if ('FILE', pagename, fname) in self.renames: fname = self.renames[('FILE', pagename, fname)] extra = urllib.quote(fname.encode('utf-8')) if ('PAGE', pagename) in self.renames: pagename = self.renames[('PAGE', pagename)] timestamp = str(timestamp) if rev != 99999999: max_rev = max(rev, max_rev) revstr = '%08d' % rev pagename = wikiutil.quoteWikinameFS(pagename) fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = '\t'.join(fields) + '\n' f.write(log_str) if create_rev and not deleted: timestamp = str(wikiutil.timestamp2version(time.time())) revstr = '%08d' % (max_rev + 1) action = 'SAVE' ip = '127.0.0.1' hostname = 'localhost' userid = '' extra = '' comment = "converted to 1.6 markup" fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = '\t'.join(fields) + '\n' f.write(log_str) f.close()
def write(self, fname, deleted=False): """ write complete edit-log to disk """ if self.data: editlog = self.data.items() editlog.sort() f = file(fname, 'wb') # write in binary mode, so it stays exactly as we write it, even on windows. # the code in MoinMoin.logfile also uses binary mode and writes \n only. max_rev = 0 for key, fields in editlog: timestamp, rev, action, pagename, ip, hostname, userid, extra, comment = fields if action.startswith('ATT'): try: fname = urllib.unquote(extra).decode('utf-8') except UnicodeDecodeError: fname = urllib.unquote(extra).decode('iso-8859-1') if ('FILE', pagename, fname) in self.renames: fname = self.renames[('FILE', pagename, fname)] extra = urllib.quote(fname.encode('utf-8')) if ('PAGE', pagename) in self.renames: pagename = self.renames[('PAGE', pagename)] timestamp = str(timestamp) if rev != 99999999: max_rev = max(rev, max_rev) revstr = '%08d' % rev pagename = wikiutil.quoteWikinameFS(pagename) fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = '\t'.join(fields) + '\n' f.write(log_str) if create_rev and not deleted: timestamp = str(wikiutil.timestamp2version(time.time())) revstr = '%08d' % (max_rev + 1) action = 'SAVE' ip = '127.0.0.1' hostname = 'localhost' userid = '' extra = '' comment = "converted to 1.6 markup" fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = '\t'.join(fields) + '\n' f.write(log_str) f.close()
def write(self, fname, deleted=False): """ write complete edit-log to disk """ if self.data: editlog = self.data.items() editlog.sort() f = file(fname, "w") max_rev = 0 for key, fields in editlog: timestamp, rev, action, pagename, ip, hostname, userid, extra, comment = fields if action.startswith("ATT"): try: fname = urllib.unquote(extra).decode("utf-8") except UnicodeDecodeError: fname = urllib.unquote(extra).decode("iso-8859-1") if ("FILE", pagename, fname) in self.renames: fname = self.renames[("FILE", pagename, fname)] extra = urllib.quote(fname.encode("utf-8")) if ("PAGE", pagename) in self.renames: pagename = self.renames[("PAGE", pagename)] timestamp = str(timestamp) if rev != 99999999: max_rev = max(rev, max_rev) revstr = "%08d" % rev pagename = wikiutil.quoteWikinameFS(pagename) fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = "\t".join(fields) + "\n" f.write(log_str) if create_rev and not deleted: timestamp = str(wikiutil.timestamp2version(time.time())) revstr = "%08d" % (max_rev + 1) action = "SAVE" ip = "127.0.0.1" hostname = "localhost" userid = "" extra = "" comment = "converted to 1.6 markup" fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment log_str = "\t".join(fields) + "\n" f.write(log_str) f.close()
def crossquery(querystring, lg): url = 'http://www.galoes.org/grammars/%s/%s' width = 20 d = '/var/wiki' results = [] qp = xapian.QueryParser() database = xapian.Database('/var/wiki/xapian/%s/index' % lg) qp.set_database(database) msc = MoinSearchConnection(database) qresults = msc.get_all_documents(query=qp.parse_query(querystring)) for r in qresults: wikiname = r.data['title'][0] wikinamefs = wikiutil.quoteWikinameFS(wikiname) try: refv = '%s/%s/pages/%s/current' % (d, lg, wikinamefs) revnr = open(refv).read().strip() contentf = '%s/%s/pages/%s/revisions/%s' % (d, lg, wikinamefs, revnr) content = open(contentf).read().decode('utf8') except IOError: print "File not Found", wikinamefs continue matches = re.findall( u'%s%s%s' % ('.' + '{,%i}' % width, querystring.lower(), '.' + '{,%i}' % width), content.lower()) print matches results.append({ 'link': url % (lg, wikinamefs), 'totalmatches': len(matches), 'matches': matches, 'name': wikiname, }) return results
def mainloop(self): """ moin-dump's main code. """ if len(sys.argv) == 1: self.parser.print_help() sys.exit(1) # Prepare output directory outputdir = self.args[0] outputdir = os.path.abspath(outputdir) if not os.path.isdir(outputdir): try: os.mkdir(outputdir) _util.log("Created output directory '%s'!" % outputdir) except OSError: _util.fatal("Cannot create output directory '%s'!" % outputdir) # Load the configuration configdir = self.options.configdir if configdir: if os.path.isfile(configdir): configdir = os.path.dirname(configdir) if not os.path.isdir(configdir): _util.fatal("Bad path given to --config parameter") configdir = os.path.abspath(configdir) sys.path[0:0] = [configdir] os.chdir(configdir) # Dump the wiki request = RequestCLI(self.options.wiki_url) request.form = request.args = request.setup_args() # fix url_prefix so we get relative paths in output html request.cfg.url_prefix = url_prefix if self.options.page: pages = [self.options.page] else: # Get all existing pages in the wiki pages = list(request.rootpage.getPageList(user="")) pages.sort() wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX) errfile = os.path.join(outputdir, "error.log") errlog = open(errfile, "w") errcnt = 0 page_front_page = wikiutil.getSysPage(request, "FrontPage").page_name page_title_index = wikiutil.getSysPage(request, "TitleIndex").page_name page_word_index = wikiutil.getSysPage(request, "WordIndex").page_name navibar_html = "" for p in [page_front_page, page_title_index, page_word_index]: navibar_html += ' [<a href="%s">%s</a>]' % (wikiutil.quoteWikinameFS(p), wikiutil.escape(p)) for pagename in pages: file = wikiutil.quoteWikinameURL(pagename) # we have the same name in URL and FS _util.log('Writing "%s"...' % file) try: pagehtml = "" page = Page.Page(request, pagename) try: request.reset() out = StringIO.StringIO() request.redirect(out) page.send_page(request, count_hit=0, content_only=1) pagehtml = out.getvalue() request.redirect() except: errcnt = errcnt + 1 print >>sys.stderr, "*** Caught exception while writing page!" print >> errlog, "~" * 78 print >> errlog, file # page filename import traceback traceback.print_exc(None, errlog) finally: timestamp = time.strftime("%Y-%m-%d %H:%M") filepath = os.path.join(outputdir, file) fileout = codecs.open(filepath, "w", config.charset) fileout.write( page_template % { "charset": config.charset, "pagename": pagename, "pagehtml": pagehtml, "logo_html": logo_html, "navibar_html": navibar_html, "timestamp": timestamp, "theme": request.cfg.theme_default, } ) fileout.close() # copy FrontPage to "index.html" indexpage = page_front_page if self.options.page: indexpage = self.options.page shutil.copyfile( os.path.join(outputdir, wikiutil.quoteWikinameFS(indexpage) + HTML_SUFFIX), os.path.join(outputdir, "index" + HTML_SUFFIX), ) errlog.close() if errcnt: print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile)
def list_pages(self, request): fnfilter = lambda x: wikiutil.quoteWikinameFS(os.path.splitext(os.path.basename(x))[0].decode(request.cfg.fs_encoding)) return map(fnfilter, self._list_files(request))
def qf_convert_string(str, enc_from, enc_to): str = unquoteWikiname12(str, enc_from) str = wikiutil.quoteWikinameFS(str, enc_to) return str
#!/usr/bin/env python """ links the .po files to the current versions in the wiki. should be done before updating them with new strings """ import sys, os sys.path.insert(0, '../..') from MoinMoin import wikiutil langs = 'da de es fi fr hu it ja ko nb nl pl ru sr vi zh zh-tw'.split() broken_langs = 'hr pt sv'.split() nonwiki_langs = 'he en' data_dir = '/org/de.wikiwikiweb.moinmaster/data' for lang in langs + broken_langs: langdir = os.path.join(data_dir, 'pages', wikiutil.quoteWikinameFS('MoinI18n/%s' % lang)) pofn = lang.replace('-', '_') + '.po' if lang in broken_langs: langdir += '(2d)FIXME' pofn += '_' currentfn = os.path.join(langdir, 'current') current = open(currentfn, 'r').read().strip() wikifn = os.path.join(langdir, 'revisions', current) if os.path.exists(pofn): os.remove(pofn) os.symlink(wikifn, pofn) print "ln -s %s %s" % (wikifn, pofn)
""" get some pages from another wiki """ import sys, os, xmlrpclib, codecs sys.path.insert(0, "..") from MoinMoin import wikiutil s = xmlrpclib.ServerProxy("http://wainu.ii.uned.es/wainuki/?action=xmlrpc2") index = open("index") for l in index: d = l.split('||') pn = d[3].strip() pd = s.getPage(pn) dn = wikiutil.quoteWikinameFS(pn.decode("utf-8")) os.mkdir(dn) cn = os.path.join(dn,'current') f = open(cn,'w') f.write('00000001\n') f.close() dn2 = os.path.join(dn, 'revisions') os.mkdir(dn2) fn = os.path.join(dn2,'00000001') f = codecs.open(fn,"wb","utf-8") pd = pd.replace('\n','\r\n') f.write(pd) f.close()
p.feed(sys.stdin.read()) p.close() #pprint(p.hier) basedir = os.path.join(sys.argv[1], 'data/pages') cat = "" if len(sys.argv) > 2: cat = sys.argv[2] for start in p.glinks: for end in p.glinks[start]: p.graph.edges.add(p.nameidmap[start], p.nameidmap[end]) for node, in p.graph.nodes: nodedir = os.path.join(basedir, quoteWikinameFS(node)) curf = os.path.join(nodedir, 'current') nro = 1 revdir = os.path.join(nodedir, 'revisions') if not os.path.exists(nodedir): os.mkdir(nodedir) if not os.path.exists(revdir): os.mkdir(revdir) if os.path.exists(curf): nro = int(file(curf).read()[:-1]) + 1 os.unlink(curf) curver = "%08d" % nro file(curf, 'w').write(curver + '\n') out = file(os.path.join(revdir, curver), 'w') for par, chi in p.graph.edges:
class PluginScript(script.MoinScript): """\ Purpose: ======== This tool allows you to dump MoinMoin wiki pages to static HTML files. Detailed Instructions: ====================== General syntax: moin [options] export dump [dump-options] [options] usually should be: --config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/ [dump-options] see below: 0. You must run this script as owner of the wiki files, usually this is the web server user. 1. To dump all the pages on the wiki to the directory '/mywiki' moin ... export dump --target-dir=/mywiki 2. To dump all the pages readable by 'JohnSmith' on the wiki to the directory '/mywiki' moin ... export dump --target-dir=/mywiki --username JohnSmith """ def __init__(self, argv=None, def_values=None): script.MoinScript.__init__(self, argv, def_values) self.parser.add_option("-t", "--target-dir", dest="target_dir", help="Write html dump to DIRECTORY") self.parser.add_option( "-u", "--username", dest="dump_user", help="User the dump will be performed as (for ACL checks, etc)") def mainloop(self): """ moin-dump's main code. """ # Prepare output directory if not self.options.target_dir: script.fatal( "you must use --target-dir=/your/output/path to specify the directory we write the html files to" ) outputdir = os.path.abspath(self.options.target_dir) try: os.mkdir(outputdir) script.log("Created output directory '%s'!" % outputdir) except OSError, err: if err.errno != errno.EEXIST: script.fatal("Cannot create output directory '%s'!" % outputdir) # Insert config dir or the current directory to the start of the path. config_dir = self.options.config_dir if config_dir and os.path.isfile(config_dir): config_dir = os.path.dirname(config_dir) if config_dir and not os.path.isdir(config_dir): script.fatal("bad path given to --config-dir option") sys.path.insert(0, os.path.abspath(config_dir or os.curdir)) self.init_request() request = self.request # fix script_root so we get relative paths in output html request.script_root = url_prefix_static # use this user for permissions checks request.user = user.User(request, name=self.options.dump_user) pages = request.rootpage.getPageList( user='') # get list of all pages in wiki pages.sort() if self.options.page: # did user request a particular page or group of pages? try: namematch = re.compile(self.options.page) pages = [page for page in pages if namematch.match(page)] if not pages: pages = [self.options.page] except: pages = [self.options.page] wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: ( qfn(pagename) + HTML_SUFFIX) AttachFile.getAttachUrl = lambda pagename, filename, request, **kw: _attachment( request, pagename, filename, outputdir, **kw) errfile = os.path.join(outputdir, 'error.log') errlog = open(errfile, 'w') errcnt = 0 page_front_page = wikiutil.getLocalizedPage( request, request.cfg.page_front_page).page_name page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name navibar_html = '' for p in [page_front_page, page_title_index, page_word_index]: navibar_html += '[<a href="%s">%s</a>] ' % ( wikiutil.quoteWikinameURL(p), wikiutil.escape(p)) urlbase = request.url # save wiki base url for pagename in pages: # we have the same name in URL and FS file = wikiutil.quoteWikinameURL(pagename) script.log('Writing "%s"...' % file) try: pagehtml = '' request.url = urlbase + pagename # add current pagename to url base page = Page.Page(request, pagename) request.page = page try: request.reset() pagehtml = request.redirectedOutput(page.send_page, count_hit=0, content_only=1) except: errcnt = errcnt + 1 print >> sys.stderr, "*** Caught exception while writing page!" print >> errlog, "~" * 78 print >> errlog, file # page filename import traceback traceback.print_exc(None, errlog) finally: timestamp = time.strftime("%Y-%m-%d %H:%M") filepath = os.path.join(outputdir, file) fileout = codecs.open(filepath, 'w', config.charset) fileout.write( page_template % { 'charset': config.charset, 'pagename': pagename, 'pagehtml': pagehtml, 'logo_html': logo_html, 'navibar_html': navibar_html, 'timestamp': timestamp, 'theme': request.cfg.theme_default, }) fileout.close() # copy FrontPage to "index.html" indexpage = page_front_page if self.options.page: indexpage = pages[ 0] # index page has limited use when dumping specific pages, but create one anyway shutil.copyfile( os.path.join(outputdir, wikiutil.quoteWikinameFS(indexpage) + HTML_SUFFIX), os.path.join(outputdir, 'index' + HTML_SUFFIX)) errlog.close() if errcnt: print >> sys.stderr, "*** %d error(s) occurred, see '%s'!" % ( errcnt, errfile)
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M), called_by_toc=0): request = macro.request _ = request.getText # return immediately if getting links for the current page if request.mode_getpagelinks: return '' # parse and check arguments args = args_re.match(text) if not args: return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text,)) # prepare including page result = [] print_mode = macro.form.has_key('action') and macro.form['action'][0] == "print" this_page = macro.formatter.page if not hasattr(this_page, '_macroInclude_pagelist'): this_page._macroInclude_pagelist = {} # get list of pages to include inc_name = wikiutil.AbsPageName(request, this_page.page_name, args.group('name')) pagelist = [inc_name] if inc_name.startswith("^"): try: inc_match = re.compile(inc_name) except re.error: pass # treat as plain page name else: # Get user filtered readable page list pagelist = request.rootpage.getPageList(filter=inc_match.match) # sort and limit page list pagelist.sort() sort_dir = args.group('sort') if sort_dir == 'descending': pagelist.reverse() max_items = args.group('items') if max_items: pagelist = pagelist[:int(max_items)] skipitems = 0 if args.group("skipitems"): skipitems = int(args.group("skipitems")) titlesonly = args.group('titlesonly') editlink = args.group('editlink') # iterate over pages for inc_name in pagelist: if not request.user.may.read(inc_name): continue if this_page._macroInclude_pagelist.has_key(inc_name): result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name,)) continue if skipitems: skipitems -= 1 continue fmt = macro.formatter.__class__(request, is_included=True) fmt._base_depth = macro.formatter._base_depth inc_page = Page(request, inc_name, formatter=fmt) inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist # check for "from" and "to" arguments (allowing partial includes) body = inc_page.get_raw_body() + '\n' from_pos = 0 to_pos = -1 from_re = args.group('from') if from_re: try: from_match = re.compile(from_re, re.M).search(body) except re.error, e: ##result.append("*** fe=%s ***" % e) from_match = re.compile(re.escape(from_re), re.M).search(body) if from_match: from_pos = from_match.end() else: result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re) to_re = args.group('to') if to_re: try: to_match = re.compile(to_re, re.M).search(body, from_pos) except re.error: to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos) if to_match: to_pos = to_match.start() else: result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re) if titlesonly: newbody = [] levelstack = [] for title, level in extract_titles(body[from_pos:to_pos], title_re): if levelstack: if level > levelstack[-1]: result.append(macro.formatter.bullet_list(1)) levelstack.append(level) else: while levelstack and level < levelstack[-1]: result.append(macro.formatter.bullet_list(0)) levelstack.pop() if not levelstack or level != levelstack[-1]: result.append(macro.formatter.bullet_list(1)) levelstack.append(level) else: result.append(macro.formatter.bullet_list(1)) levelstack.append(level) result.append(macro.formatter.listitem(1)) result.append(inc_page.link_to(request, title)) result.append(macro.formatter.listitem(0)) while levelstack: result.append(macro.formatter.bullet_list(0)) levelstack.pop() continue if from_pos or to_pos != -1: inc_page.set_raw_body(body[from_pos:to_pos], modified=True) ##result.append("*** f=%s t=%s ***" % (from_re, to_re)) ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos)) if called_by_toc: result.append(inc_page.get_raw_body()) continue if not hasattr(request, "_Include_backto"): request._Include_backto = this_page.page_name # do headings level = None if args.group('heading') and args.group('hquote'): heading = args.group('htext') or inc_page.split_title(request) level = 1 if args.group('level'): level = int(args.group('level')) if print_mode: result.append(macro.formatter.heading(1, level) + macro.formatter.text(heading) + macro.formatter.heading(0, level)) else: import sha from MoinMoin import config # this heading id might produce duplicate ids, # if the same page is included multiple times # Encode stuf we feed into sha module. pntt = (inc_name + heading).encode(config.charset) hid = "head-" + sha.new(pntt).hexdigest() request._page_headings.setdefault(pntt, 0) request._page_headings[pntt] += 1 if request._page_headings[pntt] > 1: hid += '-%d'%(request._page_headings[pntt],) result.append( #macro.formatter.heading(1, level, hid, # icons=edit_icon.replace('<img ', '<img align="right" ')) + macro.formatter.heading(1, level, hid) + inc_page.link_to(request, heading, css_class="include-heading-link") + macro.formatter.heading(0, level) ) # set or increment include marker this_page._macroInclude_pagelist[inc_name] = \ this_page._macroInclude_pagelist.get(inc_name, 0) + 1 # output the included page strfile = StringIO.StringIO() request.redirect(strfile) try: cid = request.makeUniqueID("Include_%s" % wikiutil.quoteWikinameFS(inc_page.page_name)) inc_page.send_page(request, content_only=1, content_id=cid) result.append(strfile.getvalue()) finally: request.redirect() # decrement or remove include marker if this_page._macroInclude_pagelist[inc_name] > 1: this_page._macroInclude_pagelist[inc_name] = \ this_page._macroInclude_pagelist[inc_name] - 1 else: del this_page._macroInclude_pagelist[inc_name] # if no heading and not in print mode, then output a helper link if editlink and not (level or print_mode): result.extend([ '<div class="include-link">', inc_page.link_to(request, '[%s]' % (inc_name,), css_class="include-page-link"), inc_page.link_to(request, '[%s]' % (_('edit'),), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}), '</div>', ])
#pprint(p.hier) basedir = os.path.join(sys.argv[1], 'data/pages') cat = "" if len(sys.argv) > 2: cat = sys.argv[2] for start in p.glinks: for end in p.glinks[start]: p.graph.edges.add(p.nameidmap[start], p.nameidmap[end]) for node, in p.graph.nodes: nodedir = os.path.join(basedir, quoteWikinameFS(node)) curf = os.path.join(nodedir, 'current') nro = 1 revdir = os.path.join(nodedir, 'revisions') if not os.path.exists(nodedir): os.mkdir(nodedir) if not os.path.exists(revdir): os.mkdir(revdir) if os.path.exists(curf): nro = int(file(curf).read()[:-1]) + 1 os.unlink(curf) curver = "%08d" % nro file(curf, 'w').write(curver + '\n') out = file(os.path.join(revdir, curver), 'w') for par, chi in p.graph.edges:
def rawGetPage(srcurl, pagename, encoding="iso8859-1"): url = srcurl % wikiutil.quoteWikinameFS(pagename.encode(encoding)) pagedata = urllib.urlopen(url).read() return unicode(pagedata, encoding).encode("utf-8")
def unquote(pagename): from MoinMoin import wikiutil return wikiutil.quoteWikinameFS(pagename.decode('utf8'))