def run(self, key, value, remove, query): if not ((key and value) or (key and remove)) or (key and value and remove): fatal("You need to either specify a proper key/value pair or " "only a key you want to delete (with -r set).") if not remove: try: value = literal_eval(value) except ValueError: fatal("You need to specify a valid Python literal as the argument") if query: qp = app.storage.query_parser([NAME_EXACT, ]) q = qp.parse(query) else: q = Every() for current_rev in app.storage.search(q, limit=None): name = current_rev.meta[NAME] newmeta = dict(current_rev.meta) if remove: newmeta.pop(key) print "Processing {0!r}, removing {1}.".format(name, key) else: newmeta[key] = value print "Processing {0!r}, setting {1}={2!r}.".format(name, key, value) current_rev.item.store_revision(newmeta, current_rev.data)
def run(self, key, value, remove, query): if not ((key and value) or (key and remove)) or (key and value and remove): fatal("You need to either specify a proper key/value pair or " "only a key you want to delete (with -r set).") if not remove: try: value = literal_eval(value) except ValueError: fatal( "You need to specify a valid Python literal as the argument" ) if query: qp = app.storage.query_parser([ NAME_EXACT, ]) q = qp.parse(query) else: q = Every() for current_rev in app.storage.search(q, limit=None): name = current_rev.meta[NAME] newmeta = dict(current_rev.meta) if remove: newmeta.pop(key) print "Processing {0!r}, removing {1}.".format(name, key) else: newmeta[key] = value print "Processing {0!r}, setting {1}={2!r}.".format( name, key, value) current_rev.item.store_revision(newmeta, current_rev.data)
def mainloop(self): load = self.options.load save = self.options.save xml_file = self.options.xml_file if load == save: # either both True or both False fatal("You need to give either --load or --save!") if not xml_file: if load: xml_file = sys.stdin elif save: xml_file = sys.stdout self.init_request() request = self.request init_unprotected_backends(request) storage = flaskg.unprotected_storage ndays = self.options.ndays exceptndays = self.options.exceptndays nhours = self.options.nhours exceptnhours = self.options.exceptnhours now = time.time() sincetime = 0 sincetime_invert = False if ndays: sincetime = now - ndays * 24 * 3600 elif nhours: sincetime = now - nhours * 3600 elif exceptndays: sincetime = now - exceptndays * 24 * 3600 sincetime_invert = True elif exceptnhours: sincetime = now - exceptnhours * 3600 sincetime_invert = True nrevs = self.options.nrevs nrevs_invert = False exceptnrevs = self.options.exceptnrevs if exceptnrevs: nrevs = exceptnrevs nrevs_invert = True if load: unserialize(storage, xml_file) elif save: if nrevs: serialize(storage, xml_file, NLastRevs, nrevs, nrevs_invert) elif sincetime: serialize(storage, xml_file, SinceTime, sincetime, sincetime_invert) else: serialize(storage, xml_file)
def mainloop(self): """ moin-dump's main code. """ # Prepare output directory if not self.options.target_dir: script.fatal("you must use --target-dir=/your/output/path to specify the directory we write the html files to") outputdir = os.path.abspath(self.options.target_dir) try: os.mkdir(outputdir) script.log("Created output directory '%s'!" % outputdir) except OSError, err: if err.errno != errno.EEXIST: script.fatal("Cannot create output directory '%s'!" % outputdir)
def mainloop(self): try: import mailimportconf except ImportError: fatal("Could not find the file mailimportconf.py. Maybe you want to use the --config-dir=... option?") secret = mailimportconf.mail_import_secret url = mailimportconf.mail_import_url s = xmlrpclib.ServerProxy(url) result = s.ProcessMail(secret, xmlrpclib.Binary(input.read())) if result != "OK": print >> sys.stderr, result
def mainloop(self): try: import mailimportconf except ImportError: fatal( "Could not find the file mailimportconf.py. Maybe you want to use the --config-dir=... option?" ) secret = mailimportconf.mail_import_secret url = mailimportconf.mail_import_url s = xmlrpclib.ServerProxy(url) result = s.ProcessMail(secret, xmlrpclib.Binary(input.read())) if result != "OK": print >> sys.stderr, result
def mainloop(self): """ moin-package's main code. """ # Initalize request self.init_request() request = self.request _ = self.request.getText # Check our command line args if self.options.pages and self.options.search: script.fatal(_("Options --pages and --search are mutually exclusive!")) elif not self.options.output: script.fatal(_("You must specify an output file!")) elif not self.options.pages and not self.options.search: script.log(_("No pages specified using --pages or --search, assuming full package.")) include_attachments = self.options.attachment or False if include_attachments: script.log(_("All attachments included into the package.")) # Sanity checks if os.path.exists(self.options.output): script.fatal(_("Output file already exists! Cowardly refusing to continue!")) # Check for user if self.options.package_user: request.user = user.User(request, name=self.options.package_user) # Import PackagePages here, as we now have an initalized request. from MoinMoin.action.PackagePages import PackagePages # Perform actual packaging. package = PackagePages(request.rootpage.page_name, request) packageoutput = open(self.options.output, "wb") if self.options.search: packagedata = package.collectpackage( package.searchpackage(request, self.options.search), packageoutput, include_attachments=include_attachments, ) elif self.options.pages: packagedata = package.collectpackage( self.options.pages.split(","), packageoutput, include_attachments=include_attachments ) else: packagedata = package.collectpackage( request.rootpage.getPageList( include_underlay=False, filter=lambda name: not wikiutil.isSystemPage(request, name) ), packageoutput, include_attachments=include_attachments, ) if packagedata: script.fatal(packagedata)
def mainloop(self): try: import remotescriptconf as conf except ImportError: fatal("Could not find the file remotescriptconf.py. Maybe you want to use the config param?") secret = conf.remotescript_secret url = conf.remotescript_url print url, secret, self.argv s = xmlrpclib.ServerProxy(url) # TODO handle stdin # xmlrpclib.Binary(sys.stdin.read()) result = s.RemoteScript(secret, self.argv) # TODO handle stdout, stderr if result != "OK": print >> sys.stderr, result
def mainloop(self): self.init_request() request = self.request init_unprotected_backends(request) cfg = app.cfg try: data_dir_old = cfg.data_dir_old user_dir_old = cfg.user_dir_old except AttributeError: fatal(""" The backend migration did not find your old wiki data. Please, configure in your wiki config: data_dir_old = '.../data' # must be the path of your old data directory # (it must contain the pages/ subdirectory) user_dir_old = '.../data/user' # must be the path of your old user profile directory # or None (no conversion of user profiles) """) page_backend = fs19.FSPageBackend(data_dir_old) dest_content = flaskg.unprotected_storage.get_backend(cfg.ns_content) sys.stdout.write("Starting backend migration.\nConverting data.\n") content_fails = dest_content.clone(page_backend, self.options.verbose)[2] if self.options.show_failed and len(content_fails): sys.stdout.write("\nFailed report\n-------------\n") for name in content_fails.iterkeys(): sys.stdout.write("%r: %s\n" % (name, content_fails[name])) sys.stdout.write("Content migration finished!\n") if user_dir_old: user_backend = fs19.FSUserBackend(user_dir_old) dest_userprofile = flaskg.unprotected_storage.get_backend( cfg.ns_user_profile) sys.stdout.write("Converting users.\n") user_fails = dest_userprofile.clone(user_backend, self.options.verbose)[2] if self.options.show_failed and len(user_fails): sys.stdout.write("\nFailed report\n-------------\n") for name in user_fails.iterkeys(): sys.stdout.write("%r: %s\n" % (name, user_fails[name])) sys.stdout.write("User profile migration finished!\n")
def _attachment(request, pagename, filename, outputdir, **kw): filename = filename.encode(config.charset) source_dir = AttachFile.getAttachDir(request, pagename) source_file = os.path.join(source_dir, filename) dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename)) dest_file = os.path.join(dest_dir, filename) dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename)) if os.access(source_file, os.R_OK): if not os.access(dest_dir, os.F_OK): try: os.makedirs(dest_dir) except: script.fatal("Cannot create attachment directory '%s'" % dest_dir) elif not os.path.isdir(dest_dir): script.fatal("'%s' is not a directory" % dest_dir) shutil.copyfile(source_file, dest_file) script.log('Writing "%s"...' % dest_url) return dest_url else: return ""
def run(self, key, value, remove, query): storage = app.unprotected_storage if not ((key and value) or (key and remove)) or (key and value and remove): fatal("You need to either specify a proper key/value pair or " "only a key you want to delete (with -r set).") if query: qp = storage.query_parser([NAME_EXACT, ]) q = qp.parse(query) else: q = Every() results = storage.search(q, limit=None) for result in results: item_name = result[NAME] item = storage.get_item(item_name) try: last_rev = item.get_revision(-1) except NoSuchRevisionError: last_rev = None next_rev = item.create_revision(item.next_revno) if last_rev: # Copy data. copyfileobj(last_rev, next_rev) # Copy metadata: for k, v in last_rev.iteritems(): if remove and k == key: continue next_rev[k] = v if not remove: # Set or overwrite given metadata key with text next_rev[key] = literal_eval(value) print "Processing {0!r}, setting {1}={2!r}.".format(item_name, key, value) else: print "Processing {0!r}, removing {1}.".format(item_name, key) item.commit()
def mainloop(self): """ moin-package's main code. """ # Initalize request self.init_request() request = self.request _ = self.request.getText # Check our command line args if self.options.pages and self.options.search: script.fatal(_("Options --pages and --search are mutually exclusive!")) elif not self.options.output: script.fatal(_("You must specify an output file!")) elif not self.options.pages and not self.options.search: script.log(_("No pages specified using --pages or --search, assuming full package.")) include_attachments = self.options.attachment or False if include_attachments: script.log(_("All attachments included into the package.")) # Sanity checks if os.path.exists(self.options.output): script.fatal(_("Output file already exists! Cowardly refusing to continue!")) # Check for user if self.options.package_user: request.user = user.User(request, name=self.options.package_user) # Import PackagePages here, as we now have an initalized request. from MoinMoin.action.PackagePages import PackagePages # Perform actual packaging. package = PackagePages(request.rootpage.page_name, request) packageoutput = open(self.options.output, "wb") if self.options.search: packagedata = package.collectpackage(package.searchpackage(request, self.options.search), packageoutput, include_attachments=include_attachments) elif self.options.pages: packagedata = package.collectpackage(self.options.pages.split(","), packageoutput, include_attachments=include_attachments) else: packagedata = package.collectpackage(request.rootpage.getPageList( include_underlay=False, filter=lambda name: not wikiutil.isSystemPage(request, name)), packageoutput, include_attachments=include_attachments) if packagedata: script.fatal(packagedata)
def mainloop(self): self.init_request() request = self.request init_unprotected_backends(request) storage = flaskg.unprotected_storage key = self.options.key val = self.options.value remove = self.options.remove if not ((key and val) or (key and remove)) or (key and val and remove): fatal("You need to either specify a proper key/value pair or " + \ "only a key you want to delete (with -r set).") pattern = self.options.pattern query = term.NameRE(compile(pattern)) for item in storage.search_items(query): try: last_rev = item.get_revision(-1) except NoSuchRevisionError: last_rev = None next_rev = item.create_revision(item.next_revno) if last_rev: # Copy data. copyfileobj(last_rev, next_rev) # Copy metadata: for k, v in last_rev.iteritems(): if remove and k == key: continue next_rev[k] = v if not remove: # Set or overwrite given metadata key with value next_rev[key] = eval(val) item.commit()
def mainloop(self): self.init_request() request = self.request request.user.may = IAmRoot() if not self.options.page: fatal('You must specify a wiki page name (--page=Page)!') if not self.options.file: fatal('You must specify a FILE to read from (--file=FILE)!') try: fileObj = open(self.options.file, 'rb') except IOError, err: fatal(str(err))
def mainloop(self): """ moin-dump's main code. """ outputdir = '/' date_str = '' with open(self._input_file, 'r') as fd: for line in fd: if line.startswith("pubdate ="): date_str = line[10:].strip() break # Insert config dir or the current directory to the start of the path. config_dir = self.options.config_dir if config_dir and os.path.isfile(config_dir): config_dir = os.path.dirname(config_dir) if config_dir and not os.path.isdir(config_dir): script.fatal("bad path given to --config-dir option") sys.path.insert(0, os.path.abspath(config_dir or os.curdir)) self.init_request() request = self.request # fix script_root so we get relative paths in output html request.script_root = url_prefix_static # use this user for permissions checks request.user = user.User(request, name=self.options.dump_user) pages = request.rootpage.getPageList(user='') # get list of all pages in wiki pages.sort() if self._input_file: # did user request a particular page or group of pages? try: namematch = re.compile(self._input_file) pages = [page for page in pages if namematch.match(page)] if not pages: pages = [self._input_file] except: pages = [self._input_file] wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX) AttachFile.getAttachUrl = lambda pagename, filename, request, **kw: _attachment(request, pagename, filename, outputdir, **kw) errlog = sys.stderr errcnt = 0 page_front_page = wikiutil.getLocalizedPage(request, request.cfg.page_front_page).page_name page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name navibar_html = '' urlbase = request.url # save wiki base url for pagename in pages: # we have the same name in URL and FS script.log('Writing "%s"...' % self._output_file) try: pagehtml = '' request.url = urlbase + pagename # add current pagename to url base page = FSPage(request, pagename, formatter = CustomHtmlFormatter(request, store_pagelinks=1)) request.page = page try: request.reset() pagehtml = request.redirectedOutput(page.send_page, count_hit=0, content_only=1, do_cache=0) except: errcnt = errcnt + 1 print >> sys.stderr, "*** Caught exception while writing page!" print >> errlog, "~" * 78 print >> errlog, file # page filename import traceback traceback.print_exc(None, errlog) finally: timestamp = time.strftime("%Y-%m-%d %H:%M") filepath = self._output_file fileout = codecs.open(filepath, 'w', config.charset) fileout.write(page_template % { 'charset': config.charset, 'pagename': pagename, 'pagehtml': pagehtml, 'logo_html': logo_html, 'navibar_html': navibar_html, 'timestamp': timestamp, 'theme': request.cfg.theme_default, 'date_str': date_str, }) fileout.close()
class PluginScript(script.MoinScript): """\ Purpose: ======== This tool allows you to dump MoinMoin wiki pages to static HTML files. Detailed Instructions: ====================== General syntax: moin [options] export dump [dump-options] [options] usually should be: --config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/ [dump-options] see below: 0. You must run this script as owner of the wiki files, usually this is the web server user. 1. To dump all the pages on the wiki to the directory '/mywiki' moin ... export dump --target-dir=/mywiki 2. To dump all the pages readable by 'JohnSmith' on the wiki to the directory '/mywiki' moin ... export dump --target-dir=/mywiki --username JohnSmith """ def __init__(self, argv=None, def_values=None): script.MoinScript.__init__(self, argv, def_values) self.parser.add_option("-t", "--target-dir", dest="target_dir", help="Write html dump to DIRECTORY") self.parser.add_option( "-u", "--username", dest="dump_user", help="User the dump will be performed as (for ACL checks, etc)") def mainloop(self): """ moin-dump's main code. """ # Prepare output directory if not self.options.target_dir: script.fatal( "you must use --target-dir=/your/output/path to specify the directory we write the html files to" ) outputdir = os.path.abspath(self.options.target_dir) try: os.mkdir(outputdir) script.log("Created output directory '%s'!" % outputdir) except OSError, err: if err.errno != errno.EEXIST: script.fatal("Cannot create output directory '%s'!" % outputdir) # Insert config dir or the current directory to the start of the path. config_dir = self.options.config_dir if config_dir and os.path.isfile(config_dir): config_dir = os.path.dirname(config_dir) if config_dir and not os.path.isdir(config_dir): script.fatal("bad path given to --config-dir option") sys.path.insert(0, os.path.abspath(config_dir or os.curdir)) self.init_request() request = self.request # fix script_root so we get relative paths in output html request.script_root = url_prefix_static # use this user for permissions checks request.user = user.User(request, name=self.options.dump_user) pages = request.rootpage.getPageList( user='') # get list of all pages in wiki pages.sort() if self.options.page: # did user request a particular page or group of pages? try: namematch = re.compile(self.options.page) pages = [page for page in pages if namematch.match(page)] if not pages: pages = [self.options.page] except: pages = [self.options.page] wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: ( qfn(pagename) + HTML_SUFFIX) AttachFile.getAttachUrl = lambda pagename, filename, request, **kw: _attachment( request, pagename, filename, outputdir, **kw) errfile = os.path.join(outputdir, 'error.log') errlog = open(errfile, 'w') errcnt = 0 page_front_page = wikiutil.getLocalizedPage( request, request.cfg.page_front_page).page_name page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name navibar_html = '' for p in [page_front_page, page_title_index, page_word_index]: navibar_html += '[<a href="%s">%s</a>] ' % ( wikiutil.quoteWikinameURL(p), wikiutil.escape(p)) urlbase = request.url # save wiki base url for pagename in pages: # we have the same name in URL and FS file = wikiutil.quoteWikinameURL(pagename) script.log('Writing "%s"...' % file) try: pagehtml = '' request.url = urlbase + pagename # add current pagename to url base page = Page.Page(request, pagename) request.page = page try: request.reset() pagehtml = request.redirectedOutput(page.send_page, count_hit=0, content_only=1) except: errcnt = errcnt + 1 print >> sys.stderr, "*** Caught exception while writing page!" print >> errlog, "~" * 78 print >> errlog, file # page filename import traceback traceback.print_exc(None, errlog) finally: timestamp = time.strftime("%Y-%m-%d %H:%M") filepath = os.path.join(outputdir, file) fileout = codecs.open(filepath, 'w', config.charset) fileout.write( page_template % { 'charset': config.charset, 'pagename': pagename, 'pagehtml': pagehtml, 'logo_html': logo_html, 'navibar_html': navibar_html, 'timestamp': timestamp, 'theme': request.cfg.theme_default, }) fileout.close() # copy FrontPage to "index.html" indexpage = page_front_page if self.options.page: indexpage = pages[ 0] # index page has limited use when dumping specific pages, but create one anyway shutil.copyfile( os.path.join(outputdir, wikiutil.quoteWikinameFS(indexpage) + HTML_SUFFIX), os.path.join(outputdir, 'index' + HTML_SUFFIX)) errlog.close() if errcnt: print >> sys.stderr, "*** %d error(s) occurred, see '%s'!" % ( errcnt, errfile)