def treat(self): page = wikipedia.Page(self.site, self.your_page) if page.exists(): wikipedia.output( u"\nWikitable on \03{lightpurple}%s\03{default} will be completed with:\n" % self.your_page ) text = page.get() newtext = self.newraw() wikipedia.output(newtext) choice = wikipedia.inputChoice(u"Do you want to add these on wikitable?", ["Yes", "No"], ["y", "N"], "N") text = text[:-3] + newtext summ = wikipedia.translate(self.site, summary_update) if choice == "y": try: page.put(u"".join(text), summ) except: wikipedia.output(u"Impossible to edit. It may be an edit conflict... Skipping...") else: wikipedia.output(u"\nWikitable on \03{lightpurple}%s\03{default} will be created with:\n" % self.your_page) newtext = self.newtable() + self.newraw() wikipedia.output(newtext) summ = wikipedia.translate(self.site, summary_creation) choice = wikipedia.inputChoice(u"Do you want to accept this page creation?", ["Yes", "No"], ["y", "N"], "N") if choice == "y": try: page.put(newtext, summ) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % title) except wikipedia.EditConflict: wikipedia.output(u"Skipping %s because of edit conflict" % title) except wikipedia.SpamfilterError, error: wikipedia.output(u"Cannot change %s because of spam blacklist entry %s" % (title, error.url))
def treat(self): page = pywikibot.Page(self.site, self.your_page) if page.exists(): pywikibot.output(u'\nWikitable on \03{lightpurple}%s\03{default} will be completed with:\n' % self.your_page ) text = page.get() newtext = self.newraw() pywikibot.output(newtext) choice = pywikibot.inputChoice(u'Do you want to add these on wikitable?', ['Yes', 'No'], ['y', 'N'], 'N') text = text[:-3] + newtext summ = pywikibot.translate(self.site, summary_update) if choice == 'y': try: page.put(u''.join(text), summ) except: pywikibot.output(u'Impossible to edit. It may be an edit conflict... Skipping...') else: pywikibot.output(u'\nWikitable on \03{lightpurple}%s\03{default} will be created with:\n' % self.your_page ) newtext = self.newtable()+self.newraw() pywikibot.output(newtext) summ = pywikibot.translate(self.site, summary_creation) choice = pywikibot.inputChoice(u'Do you want to accept this page creation?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: page.put(newtext, summ) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % title) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % title) except pywikibot.SpamfilterError, error: pywikibot.output(u'Cannot change %s because of spam blacklist entry %s' % (title, error.url))
def useHashGenerator(self): # http://toolserver.org/~multichill/nowcommons.php?language=it&page=2&filter= lang = self.site.lang num_page = 0 word_to_skip_translated = pywikibot.translate(self.site, word_to_skip) images_processed = list() while 1: url = 'http://toolserver.org/~multichill/nowcommons.php?language=%s&page=%s&filter=' % ( lang, num_page) HTML_text = self.site.getUrl(url, no_hostname=True) reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?' reg += r'<[Aa] href="(?P<urlcommons>http://commons.wikimedia.org/.*?)">Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>' regex = re.compile(reg, re.UNICODE) found_something = False change_page = True for x in regex.finditer(HTML_text): found_something = True image_local = x.group('imagelocal') image_commons = x.group('imagecommons') if image_local in images_processed: continue change_page = False images_processed.append(image_local) # Skip images that have something in the title (useful for it.wiki) image_to_skip = False for word in word_to_skip_translated: if word.lower() in image_local.lower(): image_to_skip = True if image_to_skip: continue url_local = x.group('urllocal') url_commons = x.group('urlcommons') pywikibot.output( u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % image_local) pywikibot.output(u'Local: %s\nCommons: %s\n' % (url_local, url_commons)) result1 = webbrowser.open(url_local, 0, 1) result2 = webbrowser.open(url_commons, 0, 1) if image_local.split('Image:')[1] == image_commons: choice = pywikibot.inputChoice( u'The local and the commons images have the same name, continue?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = pywikibot.inputChoice( u'Are the two images equal?', ['Yes', 'No'], ['y', 'N'], 'N') if choice.lower() in ['y', 'yes']: yield [image_local, image_commons] else: continue # The page is dinamically updated, so we may don't need to change it if change_page: num_page += 1 # If no image found means that there aren't anymore, break. if not found_something: break
def treat(self, pageno): """ Loads the given page, does some changes, and saves it. """ site = pywikibot.getSite() page_namespace = site.family.namespaces[104][site.lang] page = pywikibot.Page(site, u'%s:%s/%d' % (page_namespace, self.prefix, pageno)) exists = page.exists() djvutxt = self.get_page(pageno) if not djvutxt: text = u'<noinclude><pagequality level="0" user="******" /><div class="pagetext">\n\n\n</noinclude><noinclude><references/></div></noinclude>' % (self.username) else: text = u'<noinclude><pagequality level="1" user="******" /><div class="pagetext">\n\n\n</noinclude>%s<noinclude><references/></div></noinclude>' % (self.username,djvutxt) # convert to wikisyntax # this adds a second line feed, which makes a new paragraph text = text.replace('', "\n") # US /x1F text = text.replace('', "\n") # GS /x1D text = text.replace('', "\n") # FF /x0C # only save if something was changed # automatically ask if overwriting an existing page ask = self.ask if exists: ask = True old_text = page.get() if old_text == text: pywikibot.output(u"No changes were needed on %s" % page.title(asLink=True)) return else: old_text = '' pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(old_text, text) if self.dry: pywikibot.inputChoice(u'Dry mode... Press enter to continue', [], [], 'dummy') return if ask: choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = 'y' if choice == 'y': try: # Save the page page.put_async(text) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def process_filename(self): # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename.decode(self.urlEncoding)) if self.useFilename: filename = self.useFilename if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'gif', u'jpg', u'jpeg', u'mid', u'midi', u'ogg', u'png', u'svg', u'xcf', u'djvu') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: choice = wikipedia.inputChoice(u"File format is not one of [%s], but %s. Continue?" % (u' '.join(allowed_formats), ext), ['yes', 'no'], ['y', 'N'], 'N') if choice == 'n': ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) if self.verifyDescription: newDescription = u'' choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['y', 'N'], 'n') if choice == 'y': import editarticle editor = editarticle.TextEditor() newDescription = editor.edit(self.description) # if user saved / didn't press Cancel if newDescription: self.description = newDescription return filename
def useHashGenerator(self): # http://toolserver.org/~multichill/nowcommons.php?language=it&page=2&filter= lang = self.site.lang num_page = 0 word_to_skip_translated = pywikibot.translate(self.site, word_to_skip) images_processed = list() while 1: url = 'http://toolserver.org/~multichill/nowcommons.php?language=%s&page=%s&filter=' % (lang, num_page) HTML_text = self.site.getUrl(url, no_hostname = True) reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?' reg += r'<[Aa] href="(?P<urlcommons>http://commons.wikimedia.org/.*?)">Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>' regex = re.compile(reg, re.UNICODE) found_something = False change_page = True for x in regex.finditer(HTML_text): found_something = True image_local = x.group('imagelocal') image_commons = x.group('imagecommons') if image_local in images_processed: continue change_page = False images_processed.append(image_local) # Skip images that have something in the title (useful for it.wiki) image_to_skip = False for word in word_to_skip_translated: if word.lower() in image_local.lower(): image_to_skip = True if image_to_skip: continue url_local = x.group('urllocal') url_commons = x.group('urlcommons') pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % image_local) pywikibot.output(u'Local: %s\nCommons: %s\n' % (url_local, url_commons)) result1 = webbrowser.open(url_local, 0, 1) result2 = webbrowser.open(url_commons, 0, 1) if image_local.split('Image:')[1] == image_commons: choice = pywikibot.inputChoice( u'The local and the commons images have the same name, continue?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = pywikibot.inputChoice( u'Are the two images equal?', ['Yes', 'No'], ['y', 'N'], 'N') if choice.lower() in ['y', 'yes']: yield [image_local, image_commons] else: continue # The page is dinamically updated, so we may don't need to change it if change_page: num_page += 1 # If no image found means that there aren't anymore, break. if not found_something: break
def treat(self, pageno): """ Loads the given page, does some changes, and saves it. """ site = pywikibot.getSite() page_namespace = site.family.namespaces[104][site.lang] page = pywikibot.Page(site, u'%s:%s/%d' % (page_namespace, self.prefix, pageno)) exists = page.exists() djvutxt = self.get_page(pageno) if not djvutxt: text = u'<noinclude><pagequality level="0" user="******" /><div class="pagetext">\n\n\n</noinclude><noinclude><references/></div></noinclude>' % (self.username) else: text = u'<noinclude><pagequality level="1" user="******" /><div class="pagetext">\n\n\n</noinclude>%s<noinclude><references/></div></noinclude>' % (self.username,djvutxt) # convert to wikisyntax # this adds a second line feed, which makes a new paragraph text = text.replace('', "\n") # US /x1F text = text.replace('', "\n") # GS /x1D text = text.replace('', "\n") # FF /x0C # only save if something was changed # automatically ask if overwriting an existing page ask = self.ask if exists: ask = True old_text = page.get() if old_text == text: pywikibot.output(u"No changes were needed on %s" % page.aslink()) return else: old_text = '' pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(old_text, text) if self.dry: pywikibot.inputChoice(u'Dry mode... Press enter to continue', [], [], 'dummy') return if ask: choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = 'y' if choice == 'y': try: # Save the page page.put_async(text) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.aslink()) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def main(): pywikibot.warning(u'This script will set preferences on all ' u'configured accounts!') pywikibot.output( u'You have %s accounts configured.' % sum([len(family) for family in config.usernames.itervalues()])) if pywikibot.inputChoice(u'Do you wish to continue?', ['no', 'yes'], ['n', 'y'], 'n') == 'n': return if pywikibot.inputChoice( u'Do you already know which preference you wish ' u'to set?', ['no', 'yes'], ['n', 'y'], 'y') == 'n': site = pywikibot.getSite() pywikibot.output(u'Getting list of available preferences from %s.' % site) prefs = Preferences(site) pywikibot.output(u'-' * 73) pywikibot.output(u'| Name | Value |') pywikibot.output(u'-' * 73) pref_data = prefs.items() pref_data.sort() for key, value in pref_data: pywikibot.output(table_cell(key, 4) + table_cell(value, 5) + '|') pywikibot.output(u'-' * 73) pywikibot.output(u'') pywikibot.output(u'(For checkboxes: An empty string evaluates to False; ' u'all others to True)') pywikibot.output(u'') while True: keys, values = [], [] while True: try: keys.append( pywikibot.input(u'Which preference do you wish to set?')) except KeyboardInterrupt: return values.append( pywikibot.input(u"To what value do you wish to set '%s'?" % keys[-1])) if pywikibot.inputChoice(u"Set more preferences?", ['no', 'yes'], ['n', 'y'], 'n') == 'n': break if pywikibot.inputChoice( u"Set %s?" % u', '.join(u'%s:%s' % (key, value) for key, value in zip(keys, values)), ['yes', 'no'], ['y', 'n'], 'n') == 'y': set_all(keys, values, verbose=True) pywikibot.output(u"Preferences have been set on all wikis.")
def main(): pywikibot.warning(u"This script will set preferences on all " u"configured accounts!") pywikibot.output( u"You have %s accounts configured." % sum([len(family) for family in config.usernames.itervalues()]) ) if pywikibot.inputChoice(u"Do you wish to continue?", ["no", "yes"], ["n", "y"], "n") == "n": return if ( pywikibot.inputChoice( u"Do you already know which preference you wish " u"to set?", ["no", "yes"], ["n", "y"], "y" ) == "n" ): site = pywikibot.getSite() pywikibot.output(u"Getting list of available preferences from %s." % site) prefs = Preferences(site) pywikibot.output(u"-" * 73) pywikibot.output(u"| Name | Value |") pywikibot.output(u"-" * 73) pref_data = prefs.items() pref_data.sort() for key, value in pref_data: pywikibot.output(table_cell(key, 4) + table_cell(value, 5) + "|") pywikibot.output(u"-" * 73) pywikibot.output(u"") pywikibot.output(u"(For checkboxes: An empty string evaluates to False; " u"all others to True)") pywikibot.output(u"") while True: keys, values = [], [] while True: try: keys.append(pywikibot.input(u"Which preference do you wish to set?")) except KeyboardInterrupt: return values.append(pywikibot.input(u"To what value do you wish to set '%s'?" % keys[-1])) if pywikibot.inputChoice(u"Set more preferences?", ["no", "yes"], ["n", "y"], "n") == "n": break if ( pywikibot.inputChoice( u"Set %s?" % u", ".join(u"%s:%s" % (key, value) for key, value in zip(keys, values)), ["yes", "no"], ["y", "n"], "n", ) == "y" ): set_all(keys, values, verbose=True) pywikibot.output(u"Preferences have been set on all wikis.")
def main(): pywikibot.warning(u'This script will set preferences on all ' u'configured accounts!') pywikibot.output(u'You have %s accounts configured.' % sum([len(family) for family in config.usernames.itervalues()])) if pywikibot.inputChoice(u'Do you wish to continue?', ['no', 'yes'], ['n', 'y'], 'n') == 'n': return if pywikibot.inputChoice(u'Do you already know which preference you wish ' u'to set?', ['no', 'yes'], ['n', 'y'], 'y') == 'n': site = pywikibot.getSite() pywikibot.output(u'Getting list of available preferences from %s.' % site) prefs = Preferences(site) pywikibot.output(u'-' * 73) pywikibot.output(u'| Name | Value |') pywikibot.output(u'-' * 73) pref_data = prefs.items() pref_data.sort() for key, value in pref_data: pywikibot.output(table_cell(key, 4) + table_cell(value, 5) + '|') pywikibot.output(u'-' * 73) pywikibot.output(u'') pywikibot.output(u'(For checkboxes: An empty string evaluates to False; ' u'all others to True)') pywikibot.output(u'') while True: keys, values = [], [] while True: try: keys.append(pywikibot.input( u'Which preference do you wish to set?')) except KeyboardInterrupt: return values.append(pywikibot.input( u"To what value do you wish to set '%s'?" % keys[-1])) if pywikibot.inputChoice(u"Set more preferences?", ['no', 'yes'], ['n', 'y'], 'n') == 'n': break if pywikibot.inputChoice( u"Set %s?" % u', '.join(u'%s:%s' % (key, value) for key, value in zip(keys, values)), ['yes', 'no'], ['y', 'n'], 'n') == 'y': set_all(keys, values, verbose=True) pywikibot.output(u"Preferences have been set on all wikis.")
def save(self, text, page, comment, cleanInfobox, minorEdit=False, botflag=False): # only save if something was changed if text != page.get() and self.canEditPage: # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) # show what was changed pywikibot.showDiff(page.get(), text) self.log.write(comment+"\n") self.log.write("======" + page.title() + "======\n") self.log.write(self.logDiff(page.get(), text)) self.log.write("\n\n") self.log.write(cleanInfobox) self.log.write("\n\n") if not self.html: self.log.close() if self.html: self.filmLogLinks.write('<a href="https://secure.wikimedia.org/wikipedia/en/wiki/'+page.title().replace(" ", "_")+'?action=edit">'+page.title()+'</a><br />'+"\n") elif self.dry: spNotepad = subprocess.Popen('notepad filmLog.diff') spChrome = subprocess.Popen(self.chrome+' '+"https://secure.wikimedia.org/wikipedia/en/wiki/"+page.title().replace(" ", "_").encode('utf-8', 'replace')+"?action=edit") choice = pywikibot.inputChoice("This is a wait", ['Yes', 'No'], ['y', 'N'], 'N') pywikibot.output(u'Comment: %s' %comment) if not self.dry: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page page.put(text, comment=comment, minorEdit=minorEdit, botflag=botflag) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output( u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url)) else: return True else: spNotepad = subprocess.Popen('notepad filmLog.diff') spChrome = subprocess.Popen(self.chrome+' '+"https://secure.wikimedia.org/wikipedia/en/wiki/"+page.title().replace(" ", "_").encode('utf-8', 'replace')+"?action=edit") choice = pywikibot.inputChoice("This is a wait", ['Yes', 'No'], ['y', 'N'], 'N')
def PickTarget(self, title, original, candidates): if len(candidates) == 0: return None if len(candidates) == 1: return candidates[0] pagesDontExist = [] pagesRedir = {} pagesExist = [] for newTitle in candidates: dst = self.Page(newTitle) if not dst.exists(): pagesDontExist.append(newTitle) elif dst.isRedirectPage(): pagesRedir[newTitle] = dst.getRedirectTarget().title() else: pagesExist.append(newTitle) if len(pagesExist) == 1: return pagesExist[0] elif len(pagesExist) == 0 and len(pagesRedir) > 0: if len(pagesRedir) == 1: return pagesRedir.keys()[0] t = None for k, v in pagesRedir.iteritems(): if not t: t = v # first item elif t != v: break else: # all redirects point to the same target # pick the first one, doesn't matter what it is return pagesRedir.keys()[0] if not self.autonomous: pywikibot.output( u"Could not auto-decide for page %s. Which link should be chosen?" % self.MakeLink(title, False) ) pywikibot.output(u"Original title: ", newline=False) self.ColorCodeWord(original + "\n", True) count = 1 for t in candidates: if t in pagesDontExist: msg = u"missing" elif t in pagesRedir: msg = u"Redirect to " + pagesRedir[t] else: msg = u"page exists" self.ColorCodeWord(u" %d: %s (%s)\n" % (count, t, msg), True) count += 1 answers = [str(i) for i in xrange(0, count)] choice = int(pywikibot.inputChoice(u"Which link to choose? (0 to skip)", answers, [a[0] for a in answers])) if choice > 0: return candidates[choice - 1] return None
def handleNextLink(self, text, match, context=100): """ Returns a tuple (text, jumpToBeginning). text is the unicode string after the current link has been processed. jumpToBeginning is a boolean which specifies if the cursor position should be reset to 0. This is required after the user has edited the article. """ # ignore interwiki links and links to sections of the same page as well # as section links if not match.group('title') \ or self.pageToUnlink.site().isInterwikiLink(match.group('title')) \ or match.group('section'): return text, False linkedPage = pywikibot.Page(self.pageToUnlink.site(), match.group('title')) # Check whether the link found is to the current page itself. if linkedPage != self.pageToUnlink: # not a self-link return text, False else: # at the beginning of the link, start red color. # at the end of the link, reset the color to default if self.always: choice = 'a' else: pywikibot.output( text[max(0, match.start() - context):match.start()] + '\03{lightred}' + text[match.start():match.end()] + '\03{default}' + text[match.end():match.end() + context]) choice = pywikibot.inputChoice( u'\nWhat shall be done with this link?\n', ['unlink', 'skip', 'edit', 'more context', 'unlink all', 'quit'], ['U', 's', 'e', 'm', 'a', 'q'], 'u') pywikibot.output(u'') if choice == 's': # skip this link return text, False elif choice == 'e': editor = editarticle.TextEditor() newText = editor.edit(text, jumpIndex=match.start()) # if user didn't press Cancel if newText: return newText, True else: return text, True elif choice == 'm': # show more context by recursive self-call return self.handleNextLink(text, match, context=context + 100) elif choice == 'a': self.always = True elif choice == 'q': self.done = True return text, False new = match.group('label') or match.group('title') new += match.group('linktrail') return text[:match.start()] + new + text[match.end():], False
def run(self): for page in self.generator: if page.isRedirectPage(): page = page.getRedirectTarget() page_t = page.title() # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n>>> \03{lightpurple}%s\03{default} <<<" % page_t) page_cap = wikipedia.Page(wikipedia.getSite(), page_t.title().capitalize()) if not page_cap.exists(): wikipedia.output(u'%s doesn\'t exist' % page_cap.title()) if not self.acceptall: choice = wikipedia.inputChoice( u'Do you want to create a redirect?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if self.acceptall or choice == 'y': try: wikipedia.setAction( wikipedia.translate(wikipedia.getSite(), msg) % page_t) page_cap.put(u"#REDIRECT [[%s]]" % page_t) print except: wikipedia.output( u"An error occurred. Retrying in 15 seconds...") time.sleep(15) continue else: wikipedia.output(u'%s already exists, skipping...\n' % page_t.title())
def save(self, page, newText): """ Saves the page to the wiki, if the user accepts the changes made. """ pywikibot.showDiff(page.get(), newText) if not self.always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'Always yes'], ['y', 'N', 'a'], 'Y') if choice == 'n': return elif choice == 'a': self.always = True if self.always: try: page.put(newText) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title(),)) except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % (page.title(),))
def treat(self, page): try: # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) ccToolkit = CosmeticChangesToolkit( page.site(), debug=True, namespace=page.namespace(), pageTitle=page.title() ) changedText = ccToolkit.change(page.get()) if changedText.strip() != page.get().strip(): if not self.acceptall: choice = pywikibot.inputChoice( u"Do you want to accept these changes?", ["Yes", "No", "All", "Quit"], ["y", "N", "a", "q"], "N" ) if choice == "a": self.acceptall = True elif choice == "q": self.done = True return if self.acceptall or choice == "y": page.put(changedText, comment=self.comment) else: pywikibot.output("No changes were necessary in %s" % page.title()) except pywikibot.NoPage: pywikibot.output("Page %s does not exist?!" % page.title(asLink=True)) except pywikibot.IsRedirectPage: pywikibot.output("Page %s is a redirect; skipping." % page.title(asLink=True)) except pywikibot.LockedPage: pywikibot.output("Page %s is locked?!" % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output("An edit conflict has occured at %s." % page.title(asLink=True))
def save(self, page, text): if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(page.get(), text) if not self.always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'Always yes'], ['y', 'N', 'a'], 'N') if choice == 'n': return elif choice == 'a': self.always = True if self.always: try: page.put(text, comment=self.comment) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title(), )) except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % (page.title(), ))
def main(): doCount = False argsList = [] namespaces = [] for arg in wikipedia.handleArgs(): if arg == '-count': doCount = True elif arg.startswith('-namespace:'): try: namespaces.append(int(arg[len('-namespace:'):])) except ValueError: namespaces.append(arg[len('-namespace:'):]) else: argsList.append(arg) if doCount: robot = ReferencesRobot() if not argsList: argsList = templates choice = '' if 'reflist' in argsList: wikipedia.output(u'NOTE: it will take a long time to count "reflist".') choice = wikipedia.inputChoice(u'Proceed anyway?', ['yes', 'no', 'skip'], ['y', 'n', 's'], 'y') if choice == 's': argsList.remove('reflist') if choice <> 'n': robot.countRefs(argsList, namespaces) else: wikipedia.showHelp('refcheck')
def handleNextLink(self, text, match, context = 100): """ Returns a tuple (text, jumpToBeginning). text is the unicode string after the current link has been processed. jumpToBeginning is a boolean which specifies if the cursor position should be reset to 0. This is required after the user has edited the article. """ # ignore interwiki links and links to sections of the same page as well # as section links if not match.group('title') \ or self.pageToUnlink.site().isInterwikiLink(match.group('title')) \ or match.group('section'): return text, False linkedPage = pywikibot.Page(self.pageToUnlink.site(), match.group('title')) # Check whether the link found is to the current page itself. if linkedPage != self.pageToUnlink: # not a self-link return text, False else: # at the beginning of the link, start red color. # at the end of the link, reset the color to default if self.always: choice = 'a' else: pywikibot.output( text[max(0, match.start() - context) : match.start()] \ + '\03{lightred}' + text[match.start() : match.end()] \ + '\03{default}' + text[match.end() : match.end() + context]) choice = pywikibot.inputChoice( u'\nWhat shall be done with this link?\n', ['unlink', 'skip', 'edit', 'more context', 'unlink all', 'quit'], ['U', 's', 'e', 'm', 'a', 'q'], 'u') pywikibot.output(u'') if choice == 's': # skip this link return text, False elif choice == 'e': editor = editarticle.TextEditor() newText = editor.edit(text, jumpIndex = match.start()) # if user didn't press Cancel if newText: return newText, True else: return text, True elif choice == 'm': # show more context by recursive self-call return self.handleNextLink(text, match, context=context + 100) elif choice == 'a': self.always = True elif choice == 'q': self.done = True return text, False new = match.group('label') or match.group('title') new += match.group('linktrail') return text[:match.start()] + new + text[match.end():], False
def treat(self, page): try: # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) ccToolkit = CosmeticChangesToolkit(page.site(), debug = True, namespace = page.namespace(), pageTitle=page.title()) changedText = ccToolkit.change(page.get()) if changedText.strip() != page.get().strip(): if not self.acceptall: choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'All', 'Quit'], ['y', 'N', 'a', 'q'], 'N') if choice == 'a': self.acceptall = True elif choice == 'q': self.done = True return if self.acceptall or choice == 'y': page.put(changedText, comment=self.comment) else: pywikibot.output('No changes were necessary in %s' % page.title()) except pywikibot.NoPage: pywikibot.output("Page %s does not exist?!" % page.aslink()) except pywikibot.IsRedirectPage: pywikibot.output("Page %s is a redirect; skipping." % page.aslink()) except pywikibot.LockedPage: pywikibot.output("Page %s is locked?!" % page.aslink()) except pywikibot.EditConflict: pywikibot.output("An edit conflict has occured at %s." % page.aslink())
def treat(self, page): if page.isRedirectPage(): page = page.getRedirectTarget() page_t = page.title() # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n>>> \03{lightpurple}%s\03{default} <<<" % page_t) if self.titlecase: page_cap = pywikibot.Page(self.site, page_t.title()) else: page_cap = pywikibot.Page(self.site, page_t.capitalize()) if page_cap.exists(): pywikibot.output(u'%s already exists, skipping...\n' % page_cap.title(asLink=True)) else: pywikibot.output(u'[[%s]] doesn\'t exist' % page_cap.title()) if not self.acceptall: choice = pywikibot.inputChoice( u'Do you want to create a redirect?', ['Yes', 'No', 'All', 'Quit'], ['y', 'N', 'a', 'q'], 'N') if choice == 'a': self.acceptall = True elif choice == 'q': self.done = True if self.acceptall or choice == 'y': comment = pywikibot.translate(self.site, msg) % page_t try: page_cap.put(u"#%s [[%s]]" % (self.site.redirect(True), page_t), comment) except: pywikibot.output(u"An error occurred, skipping...")
def put_page(self, page, new): """ Prints diffs between orginal and new (text), puts new text for page """ pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(page.get(), new) if not self.acceptall: choice = pywikibot.inputChoice(u'Do you want to accept ' + u'these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if choice == 'y': page.put_async(new) if self.acceptall: try: page.put(new) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title(),)) except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.PageNotSaved, error: pywikibot.output(u'Error putting page: %s' % (error.args,))
def appendtext(page, apptext): global always if page.isRedirectPage(): page = page.getRedirectTarget() if not page.exists(): if page.isTalkPage(): text = u'' else: raise pywikibot.NoPage(u"Page '%s' does not exist" % page.title()) else: text = page.get() # Here you can go editing. If you find you do not # want to edit this page, just return oldtext = text text += apptext if text != oldtext: pywikibot.showDiff(oldtext, text) if not always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': always = True if always or choice == 'y': page.put(text, pywikibot.translate(pywikibot.getSite(), comment))
def save(self, page, text): if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(page.get(), text) if not self.always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'Always yes'], ['y', 'N', 'a'], 'N') if choice == 'n': return elif choice == 'a': self.always = True if self.always: try: page.put(text, comment=self.comment) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % (page.title(),)) except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % (page.title(),))
def put_page(self, page, new): """ Prints diffs between orginal and new (text), puts new text for page """ pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(page.get(), new) if not self.acceptall: choice = pywikibot.inputChoice( u'Do you want to accept ' + u'these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if choice == 'y': page.put_async(new, self.msg) if self.acceptall: try: page.put(new, self.msg) except pywikibot.EditConflict: pywikibot.output(u'Skipping %s because of edit conflict' % page.title()) except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.PageNotSaved, error: pywikibot.error(u'putting page: %s' % error.args)
def main(): #page generator gen = None pageTitle = [] # This factory is responsible for processing command line arguments # that are also used by other scripts and that determine on which pages # to work on. genFactory = pagegenerators.GeneratorFactory() for arg in wikipedia.handleArgs(): if not genFactory.handleArg(arg): pageTitle.append(arg) # Disabled this check. Although the point is still valid, there # is now a warning and a prompt (see below). #if wikipedia.getSite() == wikipedia.getSite('nl','wikipedia'): #print "Deze bot is op WikipediaNL niet gewenst." #print "Het toevoegen van cosmetic changes bij andere wijzigingen is toegestaan," #print "maar cosmetic_changes als stand-alone bot niet." #print "Zoek alstublieft een nuttig gebruik voor uw bot." #sys.exit() if pageTitle: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) if not gen: gen = genFactory.getCombinedGenerator() if not gen: wikipedia.showHelp() elif wikipedia.inputChoice(warning + '\nDo you really want to continue?', ['yes', 'no'], ['y', 'N'], 'N') == 'y': preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = CosmeticChangesBot(preloadingGen) bot.run()
def save(self, text, page, comment=None, **kwargs): # only save if something was changed if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) # show what was changed pywikibot.showDiff(page.get(), text) pywikibot.output(u'Comment: %s' % comment) choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page page.put(text, comment=comment or self.comment, **kwargs) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output( u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url)) else: return True
def save(self, text, page, comment, minorEdit=False, botflag=False): # only save if something was changed if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) # show what was changed pywikibot.showDiff(page.get(), text) pywikibot.output(u'Comment: %s' %comment) choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page page.put(text, comment=comment, minorEdit=minorEdit, botflag=botflag) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output( u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url)) else: return True
def reportBadAccount(self, name=None, final=False): #Queue process if name: if globalvar.confirm: answer = pywikibot.inputChoice( u'%s may have an unwanted username, do you want to report this user?' % name, ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if answer in ['a', 'all']: answer = 'y' globalvar.confirm = False else: answer = 'y' if answer.lower() in ['yes', 'y'] or not globalvar.confirm: showStatus() pywikibot.output( u'%s is possibly an unwanted username. It will be reported.' % name) if hasattr(self, '_BAQueue'): self._BAQueue.append(name) else: self._BAQueue = [name] if len(self._BAQueue) >= globalvar.dumpToLog or final: rep_text = '' #name in queue is max, put detail to report page pywikibot.output("Updating badname accounts to report page...") rep_page = pywikibot.Page( self.site, pywikibot.translate(self.site, report_page)) if rep_page.exists(): text_get = rep_page.get() else: text_get = u'This is a report page for the Bad-username, please translate me. --~~~' pos = 0 # The talk page includes "_" between the two names, in this way i replace them to " ". for usrna in self._BAQueue: username = pywikibot.url2link(usrna, self.site, self.site) n = re.compile(re.escape(username), re.UNICODE) y = n.search(text_get, pos) if y: pywikibot.output(u'%s is already in the report page.' % username) else: # Adding the log. rep_text += pywikibot.translate(self.site, report_text) % username if self.site.lang == 'it': rep_text = "%s%s}}" % (rep_text, self.bname[username]) com = i18n.twtranslate(self.site, 'welcome-bad_username') if rep_text != '': rep_page.put(text_get + rep_text, comment=com, force=True, minorEdit=True) showStatus(5) pywikibot.output(u'Reported') self.BAQueue = list() else: return True
def main(): doCount = False argsList = [] namespaces = [] for arg in wikipedia.handleArgs(): if arg == '-count': doCount = True elif arg.startswith('-namespace:'): try: namespaces.append(int(arg[len('-namespace:'):])) except ValueError: namespaces.append(arg[len('-namespace:'):]) else: argsList.append(arg) if doCount: robot = ReferencesRobot() if not argsList: argsList = templates choice = '' if 'reflist' in argsList: wikipedia.output( u'NOTE: it will take a long time to count "reflist".') choice = wikipedia.inputChoice(u'Proceed anyway?', ['yes', 'no', 'skip'], ['y', 'n', 's'], 'y') if choice == 's': argsList.remove('reflist') if choice <> 'n': robot.countRefs(argsList, namespaces) else: wikipedia.showHelp('refcheck')
def main(give_url, image_url, desc): url = give_url if url == '': if image_url: url = pywikibot.input( u"What URL range should I check (use $ for the part that is changeable)") else: url = pywikibot.input( u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= pywikibot.input( u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= pywikibot.input( u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if not desc: basicdesc = pywikibot.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = desc if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer = pywikibot.inputChoice(u'Include image %s?' % image, ['yes', 'no', 'stop'], ['y', 'N', 's'], 'N') if answer == 'y': desc = pywikibot.input(u"Give the description of this image:") categories = [] while True: cat = pywikibot.input( u"Specify a category (or press enter to end adding categories)") if not cat.strip(): break if ":" in cat: categories.append("[["+cat+"]]") else: categories.append("[["+mysite.namespace(14)+":"+cat+"]]") desc = desc + "\r\n\r\n" + basicdesc + "\r\n\r\n" + \ "\r\n".join(categories) uploadBot = upload.UploadRobot(image, description = desc) uploadBot.run() elif answer == 's': break
def PickTarget(self, title, original, candidates): if len(candidates) == 0: return None if len(candidates) == 1: return candidates[0] pagesDontExist = [] pagesRedir = {} pagesExist = [] for newTitle in candidates: dst = self.Page(newTitle) if not dst.exists(): pagesDontExist.append(newTitle) elif dst.isRedirectPage(): pagesRedir[newTitle] = dst.getRedirectTarget().title() else: pagesExist.append(newTitle) if len(pagesExist) == 1: return pagesExist[0] elif len(pagesExist) == 0 and len(pagesRedir) > 0: if len(pagesRedir) == 1: return pagesRedir.keys()[0] t = None for k, v in pagesRedir.iteritems(): if not t: t = v # first item elif t != v: break else: # all redirects point to the same target # pick the first one, doesn't matter what it is return pagesRedir.keys()[0] if not self.autonomous: pywikibot.output( u'Could not auto-decide for page %s. Which link should be chosen?' % self.MakeLink(title, False)) pywikibot.output(u'Original title: ', newline=False) self.ColorCodeWord(original + "\n", True) count = 1 for t in candidates: if t in pagesDontExist: msg = u'missing' elif t in pagesRedir: msg = u'Redirect to ' + pagesRedir[t] else: msg = u'page exists' self.ColorCodeWord(u' %d: %s (%s)\n' % (count, t, msg), True) count += 1 answers = [str(i) for i in xrange(0, count)] choice = int( pywikibot.inputChoice(u'Which link to choose? (0 to skip)', answers, [a[0] for a in answers])) if choice > 0: return candidates[choice - 1] return None
def Import(self, target, project='w', crono='1', namespace='', prompt=True): """Import the page from the wiki. Requires administrator status. If prompt is True, asks the user if he wants to delete the page. """ if project == 'w': site = pywikibot.getSite(fam='wikipedia') elif project == 'b': site = pywikibot.getSite(fam='wikibooks') elif project == 'wikt': site = pywikibot.getSite(fam='wiktionary') elif project == 's': site = pywikibot.getSite(fam='wikisource') elif project == 'q': site = pywikibot.getSite(fam='wikiquote') else: site = pywikibot.getSite() # Fixing the crono value... if crono: crono = '1' else: crono = '0' # Fixing namespace's value. if namespace == '0': namespace == '' answer = 'y' if prompt: answer = pywikibot.inputChoice(u'Do you want to import %s?' % target, ['Yes', 'No'], ['y', 'N'], 'N') if answer == 'y': host = self.site().hostname() address = self.site().path() + '?title=%s&action=submit' % self.urlname() # You need to be a sysop for the import. self.site().forceLogin(sysop=True) # Getting the token. token = self.site().getToken(self, sysop=True) # Defing the predata. predata = { 'action': 'submit', 'source': 'interwiki', # from what project do you want to import the page? 'interwiki': project, # What is the page that you want to import? 'frompage': target, # The entire history... or not? 'interwikiHistory': crono, # What namespace do you want? 'namespace': '', } response, data = self.site().postForm(address, predata, sysop=True) if data: pywikibot.output(u'Page imported, checking...') if pywikibot.Page(self.importsite, target).exists(): pywikibot.output(u'Import success!') return True else: pywikibot.output(u'Import failed!') return False
def choiceProtectionLevel(operation, default): default = default[0] firstChar = map(lambda level: level[0], protectionLevels) choiceChar = wikipedia.inputChoice('Choice a protection level to %s:' % operation, protectionLevels, firstChar, default = default) for level in protectionLevels: if level.startswith(choiceChar): return level
def Import(self, target, project = 'w', crono = '1', namespace = '', prompt = True): """Import the page from the wiki. Requires administrator status. If prompt is True, asks the user if he wants to delete the page. """ # Fixing the crono value... if crono == True: crono = '1' elif crono == False: crono = '0' elif crono == '0': pass elif crono == '1': pass else: wikipedia.output(u'Crono value set wrongly.') wikipedia.stopme() # Fixing namespace's value. if namespace == '0': namespace == '' answer = 'y' if prompt: answer = wikipedia.inputChoice(u'Do you want to import %s?' % target, ['Yes', 'No'], ['y', 'N'], 'N') if answer in ['y', 'Y']: host = self.site().hostname() address = '/w/index.php?title=%s&action=submit' % self.urlname() # You need to be a sysop for the import. self.site().forceLogin(sysop = True) # Getting the token. token = self.site().getToken(self, sysop = True) # Defing the predata. predata = { 'action' : 'submit', 'source' : 'interwiki', # from what project do you want to import the page? 'interwiki' : project, # What is the page that you want to import? 'frompage' : target, # The entire history... or not? 'interwikiHistory' : crono, # What namespace do you want? 'namespace': '', } if self.site().hostname() in config.authenticate.keys(): predata['Content-type'] = 'application/x-www-form-urlencoded' predata['User-agent'] = useragent data = self.site().urlEncode(predata) response = urllib2.urlopen(urllib2.Request('http://' + self.site().hostname() + address, data)) data = u'' else: response, data = self.site().postForm(address, predata, sysop = True) if data: wikipedia.output(u'Page imported, checking...') if wikipedia.Page(site, target).exists(): wikipedia.output(u'Import success!') return True else: wikipedia.output(u'Import failed!') return False
def main(): pagesDict = { 'Template:Weather_World_C/Auto': "http://tools.wikimedia.de/~skenmy/wnweather/overlays/%d%b%y-%H-C-world", 'Template:Weather_World_F/Auto': "http://tools.wikimedia.de/~skenmy/wnweather/overlays/%d%b%y-%H-F-world", } args = wikipedia.handleArgs() all = False for currentArgument in args: if currentArgument.startswith("-always"): all = True for pageName in pagesDict: formatUrl = pagesDict[pageName] now = datetime.datetime.utcnow() urlo = now.strftime(formatUrl) wikipedia.output(u'Prendo la pagina dal server...') try: htmlText = pageText(urlo) except urllib2.HTTPError: try: wikipedia.output( u"Errore del server. Aspetto 10 secondi... " + time.strftime("%d %b %Y %H:%M:%S (UTC)", time.gmtime())) time.sleep(10) htmlText = pageText(urlo) except urllib2.HTTPError: wikipedia.output(u"Errore del server. Chiudo.") return htmlText = re.sub("Wikinews Weather Service", "Servizio Meteo di Wikinotizie", htmlText) htmlText = re.sub("World Map", r"Mondo", htmlText) htmlText = re.sub( "''\d{2}:\d{2} UTC .*? .*? .*?''</span>", "''{{subst:LOCALDAY}} {{subst:LOCALMONTHNAME}} {{subst:LOCALYEAR}}, {{subst:LOCALHOUR}}:00 [[w:CET|<span style=\"color:white; text-decoration:underline;\">CET</span>]] ({{subst:CURRENTHOUR}}:00 [[w:UTC|<span style=\"color:white; text-decoration:underline;\">UTC</span>]])''</span>", htmlText) page = wikipedia.Page(wikipedia.getSite(code='it', fam='wikinews'), pageName) if page.exists(): oldtext = page.get() else: oldtext = "" wikipedia.showDiff(oldtext, htmlText) if not all: choice = wikipedia.inputChoice(u"Modificare?", ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') else: choice = 'y' if choice in ['A', 'a']: all = True choice = 'y' if choice in ['Y', 'y']: page.put(htmlText, u"Bot: Aggiorno il meteo")
def treat(self, page): """ Loads the given page, does some changes, and saves it. """ text = self.load(page) if not text: return pattern = re.compile( ur'\|\s*備考\s*=([^\n]*\n)' ) match = pattern.search( text ) #print match.group(1) if match: comment = self.im_comment if len( match.group(1).strip() ) > 0: pattern_im = re.compile( ur'http:\/\/www.museum.or.jp' ) match_im = pattern_im.search( match.group(1) ) if match_im: pywikibot.output(u"Page %s is already done; skipping." % page.title(asLink=True)) return comment += "<br />\n" + match.group(1).strip() text = re.sub( pattern, ur'|備考=%s\n' % comment, text ) else: tmpl_pattern = re.compile( ur'{{施設(.+?)}}', re.DOTALL ) text = re.sub( tmpl_pattern, ur'{{施設\n|備考=%s\1}}' % self.im_comment, text ) # only save if something was changed if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> %s <<<" % page.title()) # show what was changed pywikibot.showDiff(page.get(), text) if not self.dry: if not self.always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = 'y' if choice == 'y': try: # Save the page page.put(text) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output( u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def main(): args = wikipedia.handleArgs() all = False for currentArgument in args: if currentArgument.startswith("-always"): all = True templateFile = codecs.open("modello_wikinews_oil.txt", "r", "utf-8") modelloVoce = templateFile.read() # Legge il modello della pagina templateFile.close() now = datetime.datetime.utcnow() urlo = "http://www.nymex.com/index.aspx" wikipedia.output(u'Prendo la pagina dal server...') try: htmlText = pageText(urlo) except urllib2.HTTPError: try: wikipedia.output(u"Errore del server. Aspetto 10 secondi... " + time.strftime("%d %b %Y %H:%M:%S (UTC)", time.gmtime()) ) time.sleep(10) htmlText = pageText(urlo) except urllib2.HTTPError: wikipedia.output(u"Errore del server. Chiudo.") return prezzoCL = re.search("<span id=\"LastCL\".*?>(.*?)</span>", htmlText).group(1) changeCL = re.search("<span id=\"ChangeCL\".*?>(<font.*?>)?(.*?)(</font>)?</span>", htmlText).group(2) prezzoBZ = re.search("<span id=\"LastBZ\".*?>(.*?)</span>", htmlText).group(1) changeBZ = re.search("<span id=\"ChangeBZ\".*?>(<font.*?>)?(.*?)(</font>)?</span>", htmlText).group(2) elencoSostituzioni = { # Sostituisce le variabili nel modello '#CL_curr': prezzoCL, '#CL_diff': changeCL, '#BZ_curr': prezzoBZ, '#BZ_diff': changeBZ, } nuovoTesto = massiveReplace(elencoSostituzioni, modelloVoce) #nuovoTesto = re.sub('\|- - - -', '|N.D.', nuovoTesto) # Per quando i dati non sono disponibili page = wikipedia.Page(wikipedia.getSite(code='it', fam='wikinews'), "Template:Dati petrolio/Auto") if page.exists(): oldtext = page.get() else: oldtext = "" wikipedia.showDiff(oldtext, nuovoTesto) if not all: choice = wikipedia.inputChoice(u"Modificare?", ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') else: choice = 'y' if choice in ['A', 'a']: all = True choice = 'y' if choice in ['Y', 'y']: page.put(nuovoTesto, u"Bot: Aggiorno prezzi petrolio")
def main(): args = wikipedia.handleArgs() all = False for currentArgument in args: if currentArgument.startswith("-always"): all = True templateFile = codecs.open("modello_palinsesto.txt", "r", "utf-8") modelloVoce = templateFile.read() # Legge il modello della pagina templateFile.close() now = datetime.datetime.utcnow() fasceOrarie = ['mattina', 'pomeriggio', 'sera'] pagineHtml = {} elencoSostituzioni = {} urlBase = "http://city.corriere.it/tv/tv.php?fascia=" for i in fasceOrarie: urlo = urlBase + i print urlo wikipedia.output(u'Prendo la pagina dal server...') try: pagineHtml[i] = pageText(urlo) except urllib2.HTTPError: try: wikipedia.output(u"Errore del server. Aspetto 10 secondi... " + time.strftime("%d %b %Y %H:%M:%S (UTC)", time.gmtime()) ) time.sleep(10) pagineHtml[i] = pageText(urlo) except urllib2.HTTPError: wikipedia.output(u"Errore del server. Chiudo.") return elencoSostituzioni['#rai1-' + i] = getProgramList('RAI 1', pagineHtml[i]) elencoSostituzioni['#rai2-' + i] = getProgramList('RAI 2', pagineHtml[i]) elencoSostituzioni['#rai3-' + i] = getProgramList('RAI 3', pagineHtml[i]) elencoSostituzioni['#rete4-' + i] = getProgramList('RETE 4', pagineHtml[i]) elencoSostituzioni['#canale5-' + i] = getProgramList('CANALE 5', pagineHtml[i]) elencoSostituzioni['#italia1-' + i] = getProgramList('ITALIA 1', pagineHtml[i]) elencoSostituzioni['#la7-' + i] = getProgramList('LA7', pagineHtml[i]) nuovoTesto = massiveReplace(elencoSostituzioni, modelloVoce) page = wikipedia.Page(wikipedia.getSite(code='it', fam='wikinews'), "Template:ProgrammiTV") if page.exists(): oldtext = page.get() else: oldtext = "" wikipedia.showDiff(oldtext, nuovoTesto) if not all: choice = wikipedia.inputChoice(u"Modificare?", ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') else: choice = 'y' if choice in ['A', 'a']: all = True choice = 'y' if choice in ['Y', 'y']: page.put(nuovoTesto, u"Bot: Aggiorno palinsesto TV")
def save(self, text, page, comment, minorEdit=True, botflag=True): # only save if something was changed if text != page.get(): # show what was changed pywikibot.showDiff(page.get(), text) pywikibot.output(u'Comment: %s' % comment) if not self.dry: if not self.always: confirm = 'y' while True: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'Always'], ['y', 'N', 'a'], 'N') if choice == 'a': confirm = pywikibot.inputChoice( u"""\ This should be used if and only if you are sure that your links are correct! Are you sure?""", ['Yes', 'No'], ['y', 'n'], 'n') if confirm == 'y': self.always = True break else: break if self.always or choice == 'y': try: # Save the page page.put(text, comment=comment, minorEdit=minorEdit, botflag=botflag) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError as error: pywikibot.output( u'Cannot change %s because of spam blacklist entry ' u'%s' % (page.title(), error.url)) else: return True return False
def treat(self, page): """ Loads the given page, does some changes, and saves it. """ text = self.load(page) if not text: return pattern = re.compile(ur'\|\s*備考\s*=([^\n]*\n)') match = pattern.search(text) #print match.group(1) if match: comment = self.im_comment if len(match.group(1).strip()) > 0: pattern_im = re.compile(ur'http:\/\/www.museum.or.jp') match_im = pattern_im.search(match.group(1)) if match_im: pywikibot.output(u"Page %s is already done; skipping." % page.title(asLink=True)) return comment += "<br />\n" + match.group(1).strip() text = re.sub(pattern, ur'|備考=%s\n' % comment, text) else: tmpl_pattern = re.compile(ur'{{施設(.+?)}}', re.DOTALL) text = re.sub(tmpl_pattern, ur'{{施設\n|備考=%s\1}}' % self.im_comment, text) # only save if something was changed if text != page.get(): # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> %s <<<" % page.title()) # show what was changed pywikibot.showDiff(page.get(), text) if not self.dry: if not self.always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = 'y' if choice == 'y': try: # Save the page page.put(text) except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked; skipping." % page.title(asLink=True)) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit conflict' % (page.title())) except pywikibot.SpamfilterError, error: pywikibot.output( u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def run(self): """ Starts the robot's action. """ keepGoing = True startFromBeginning = True while keepGoing: if startFromBeginning: self.savedProgress = None self.refreshGenerator() count = 0 for page in self.preloadingGen: try: pageText = page.get(get_redirect = True).split("\n") count += 1 except wikipedia.NoPage: wikipedia.output(u'Page %s does not exist or has already been deleted, skipping.' % page.aslink()) continue # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) wikipedia.output(u'- - - - - - - - - ') if len(pageText) > 75: wikipedia.output('The page detail is too many lines, only output first 50 lines:') wikipedia.output(u'- - - - - - - - - ') wikipedia.output(u'\n'.join(pageText[:50])) else: wikipedia.output(u'\n'.join(pageText)) wikipedia.output(u'- - - - - - - - - ') choice = wikipedia.inputChoice(u'Input action?', ['delete', 'skip', 'update', 'quit'], ['d', 'S', 'u', 'q'], 'S') if choice == 'q': keepGoing = False break elif choice == 'u': wikipedia.output(u'Updating from CSD category.') self.savedProgress = page.title() startFromBeginning = False break elif choice == 'd': reason = self.getReasonForDeletion(page) wikipedia.output(u'The chosen reason is: \03{lightred}%s\03{default}' % reason) page.delete(reason, prompt = False) else: wikipedia.output(u'Skipping page %s' % page.title()) startFromBeginning = True if count == 0: if startFromBeginning: wikipedia.output(u'There are no pages to delete.\nWaiting for 30 seconds or press Ctrl+C to quit...') try: time.sleep(30) except KeyboardInterrupt: keepGoing = False else: startFromBeginning = True wikipedia.output(u'Quitting program.')
def main(): # HACK: This can be removed when pywikipedia bug 3315395 has been fixed safetyLock = 'birthcat-unlock.dat' if not os.path.exists(safetyLock): choice = pywikibot.inputChoice( u'Have you patched textlib.py in pywikipedia?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': open(safetyLock, 'w').close() else: return False # END OF HACK # This factory is responsible for processing command line arguments # that are also used by other scripts and that determine on which pages # to work on. genFactory = pagegenerators.GeneratorFactory() # The generator gives the pages that should be worked upon. gen = None # This temporary array is used to read the page title if one single # page to work on is specified by the arguments. pageTitleParts = [] # If dry is True, doesn't do any real changes, but only show # what would have been changed. dry = False # If auto is True, run in autonomous mode. auto = False # Parse command line arguments for arg in pywikibot.handleArgs(): if arg.startswith("-dry"): dry = True elif arg.startswith("-auto"): auto = True else: # check if a standard argument like # -start:XYZ or -ref:Asdf was given. if not genFactory.handleArg(arg): pageTitleParts.append(arg) if pageTitleParts != []: # We will only work on a single page. pageTitle = ' '.join(pageTitleParts) page = pywikibot.Page(pywikibot.getSite(), pageTitle) gen = iter([page]) if not gen: gen = genFactory.getCombinedGenerator() if gen: # The preloading generator is responsible for downloading multiple # pages from the wiki simultaneously. gen = pagegenerators.PreloadingGenerator(gen) bot = BirthCatBot(gen, auto, dry) bot.run() else: pywikibot.showHelp()
def Import(self, target, project = 'w', crono = '1', namespace = '', prompt = True): """Import the page from the wiki. Requires administrator status. If prompt is True, asks the user if he wants to delete the page. """ if project == 'w': site = pywikibot.getSite(fam = 'wikipedia') elif project == 'b': site = pywikibot.getSite(fam = 'wikibooks') elif project == 'wikt': site = pywikibot.getSite(fam = 'wiktionary') elif project == 's': site = pywikibot.getSite(fam = 'wikisource') elif project == 'q': site = pywikibot.getSite(fam = 'wikiquote') else: site = pywikibot.getSite() # Fixing the crono value... if crono == True: crono = '1' elif crono == False: crono = '0' # Fixing namespace's value. if namespace == '0': namespace == '' answer = 'y' if prompt: answer = pywikibot.inputChoice(u'Do you want to import %s?' % target, ['Yes', 'No'], ['y', 'N'], 'N') if answer == 'y': host = self.site().hostname() address = self.site().path() + '?title=%s&action=submit' % self.urlname() # You need to be a sysop for the import. self.site().forceLogin(sysop = True) # Getting the token. token = self.site().getToken(self, sysop = True) # Defing the predata. predata = { 'action' : 'submit', 'source' : 'interwiki', # from what project do you want to import the page? 'interwiki' : project, # What is the page that you want to import? 'frompage' : target, # The entire history... or not? 'interwikiHistory' : crono, # What namespace do you want? 'namespace': '', } response, data = self.site().postForm(address, predata, sysop = True) if data: pywikibot.output(u'Page imported, checking...') if pywikibot.Page(self.importsite, target).exists(): pywikibot.output(u'Import success!') return True else: pywikibot.output(u'Import failed!') return False
def categories(self): for page in self.generator: try: pywikibot.output(u'\n>>>> %s <<<<' % page.title()) commons = pywikibot.getSite().image_repository() commonsCategory = catlib.Category(commons, 'Category:%s' % page.title()) try: getcommonscat = commonsCategory.get(get_redirect=True) commonsCategoryTitle = commonsCategory.title() categoryname = commonsCategoryTitle.split('Category:', 1)[1] if page.title() == categoryname: oldText = page.get() text = oldText # for commonscat template findTemplate = re.compile(ur'\{\{[Cc]ommons') s = findTemplate.search(text) findTemplate2 = re.compile(ur'\{\{[Ss]isterlinks') s2 = findTemplate2.search(text) if s or s2: pywikibot.output(u'** Already done.') else: text = pywikibot.replaceCategoryLinks( text + u'{{commonscat|%s}}' % categoryname, page.categories()) if oldText != text: pywikibot.showDiff(oldText, text) if not self.acceptall: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if self.acceptall or choice == 'y': try: msg = pywikibot.translate( pywikibot.getSite(), comment2) page.put(text, msg) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit ' u'conflict' % (page.title())) except pywikibot.NoPage: pywikibot.output(u'Category does not exist in Commons!') except pywikibot.NoPage: pywikibot.output(u'Page %s does not exist' % page.title()) except pywikibot.IsRedirectPage: pywikibot.output(u'Page %s is a redirect; skipping.' % page.title()) except pywikibot.LockedPage: pywikibot.output(u'Page %s is locked' % page.title())
def categories(self): for page in self.generator: try: wikipedia.output(u'\n>>>> %s <<<<' % page.title()) getCommons = wikipedia.getSite('commons', 'commons') commonsCategory = catlib.Category(getCommons, 'Category:%s' % page.title()) try: getcommonscat = commonsCategory.get(get_redirect=True) commonsCategoryTitle = commonsCategory.title() categoryname = commonsCategoryTitle.split('Category:', 1)[1] if page.title() == categoryname: oldText = page.get() text = oldText # for commonscat template findTemplate = re.compile(ur'\{\{[Cc]ommons') s = findTemplate.search(text) findTemplate2 = re.compile(ur'\{\{[Ss]isterlinks') s2 = findTemplate2.search(text) if s or s2: wikipedia.output(u'** Already done.') else: text = wikipedia.replaceCategoryLinks( text + u'{{commonscat|%s}}' % categoryname, page.categories()) if oldText != text: wikipedia.showDiff(oldText, text) if not self.acceptall: choice = wikipedia.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if self.acceptall or choice == 'y': try: msg = wikipedia.translate( wikipedia.getSite(), comment2) page.put(text, msg) except wikipedia.EditConflict: wikipedia.output( u'Skipping %s because of edit conflict' % (page.title())) except wikipedia.NoPage: wikipedia.output(u'Category does not exist in Commons!') except wikipedia.NoPage: wikipedia.output(u'Page %s does not exist?!' % page.title()) except wikipedia.IsRedirectPage: wikipedia.output(u'Page %s is a redirect; skipping.' % page.title()) except wikipedia.LockedPage: wikipedia.output(u'Page %s is locked?!' % page.title())
def reportBadAccount(self, name=None, final=False): # Queue process if name: if globalvar.confirm: answer = wikipedia.inputChoice( u"%s may have an unwanted username, do you want to report this user?" % name, ["Yes", "No", "All"], ["y", "N", "a"], "N", ) if answer in ["a", "all"]: answer = "y" globalvar.confirm = False else: answer = "y" if answer.lower() in ["yes", "y"] or not globalvar.confirm: showStatus() wikipedia.output(u"%s is possibly an unwanted username. It will be reported." % name) if hasattr(self, "_BAQueue"): self._BAQueue.append(name) else: self._BAQueue = [name] if len(self._BAQueue) >= globalvar.dumpToLog or final: rep_text = "" # name in queue is max, put detail to report page wikipedia.output("Updating badname accounts to report page...") rep_page = wikipedia.Page(self.site, wikipedia.translate(self.site, report_page)) if rep_page.exists(): text_get = rep_page.get() else: text_get = u"This is a report page for the Bad-username, please translate me. --~~~" pos = 0 # The talk page includes "_" between the two names, in this way i replace them to " ". for usrna in self._BAQueue: username = wikipedia.url2link(usrna, self.site, self.site) n = re.compile(re.escape(username), re.UNICODE) y = n.search(text_get, pos) if y: wikipedia.output(u"%s is already in the report page." % username) else: # Adding the log. rep_text += wikipedia.translate(self.site, report_text) % username if self.site.lang == "it": rep_text = "%s%s}}" % (rep_text, self.bname[username]) com = wikipedia.translate(self.site, comment) if rep_text != "": rep_page.put(text_get + rep_text, comment=com, minorEdit=True) showStatus(5) wikipedia.output(u"Reported") self.BAQueue = list() else: return True
def choiceProtectionLevel(operation, default): default = default[0] firstChar = map(lambda level: level[0], protectionLevels) choiceChar = wikipedia.inputChoice('Choice a protection level to %s:' % operation, protectionLevels, firstChar, default=default) for level in protectionLevels: if level.startswith(choiceChar): return level
def open(self, talkPage): if self.count == 10: choice = pywikibot.inputChoice("This is a wait", ['Yes', 'No'], ['y', 'N'], 'N') self.count = 0 self.count += 1 Chrome = subprocess.Popen( self.chrome + ' ' + "https://secure.wikimedia.org/wikipedia/en/wiki/" + talkPage.title().replace(" ", "_").encode('utf-8', 'replace') + "?action=edit")
def main(): # HACK: This can be removed when pywikipedia bug 3315395 has been fixed safetyLock = "birthcat-unlock.dat" if not os.path.exists(safetyLock): choice = pywikibot.inputChoice(u"Have you patched textlib.py in pywikipedia?", ["Yes", "No"], ["y", "N"], "N") if choice == "y": open(safetyLock, "w").close() else: return False # END OF HACK # This factory is responsible for processing command line arguments # that are also used by other scripts and that determine on which pages # to work on. genFactory = pagegenerators.GeneratorFactory() # The generator gives the pages that should be worked upon. gen = None # This temporary array is used to read the page title if one single # page to work on is specified by the arguments. pageTitleParts = [] # If dry is True, doesn't do any real changes, but only show # what would have been changed. dry = False # If auto is True, run in autonomous mode. auto = False # Parse command line arguments for arg in pywikibot.handleArgs(): if arg.startswith("-dry"): dry = True elif arg.startswith("-auto"): auto = True else: # check if a standard argument like # -start:XYZ or -ref:Asdf was given. if not genFactory.handleArg(arg): pageTitleParts.append(arg) if pageTitleParts != []: # We will only work on a single page. pageTitle = " ".join(pageTitleParts) page = pywikibot.Page(pywikibot.getSite(), pageTitle) gen = iter([page]) if not gen: gen = genFactory.getCombinedGenerator() if gen: # The preloading generator is responsible for downloading multiple # pages from the wiki simultaneously. gen = pagegenerators.PreloadingGenerator(gen) bot = BirthCatBot(gen, auto, dry) bot.run() else: pywikibot.showHelp()
def showQuest(site, page): quest = pywikibot.inputChoice(u'Do you want to open the page?', ['with browser', 'with gui', 'no'], ['b','g','n'], 'n') pathWiki = site.family.nicepath(site.lang) url = 'http://%s%s%s?&redirect=no' % (pywikibot.getSite().hostname(), pathWiki, page.urlname()) if quest == 'b': webbrowser.open(url) elif quest == 'g': import editarticle editor = editarticle.TextEditor() text = editor.edit(page.get())
def pages(self): for page in self.generator: try: pywikibot.output(u'\n>>>> %s <<<<' % page.title()) commons = pywikibot.getSite().image_repository() commonspage = pywikibot.Page(commons, page.title()) try: getcommons = commonspage.get(get_redirect=True) if page.title() == commonspage.title(): oldText = page.get() text = oldText # for commons template findTemplate = re.compile(ur'\{\{[Cc]ommonscat') s = findTemplate.search(text) findTemplate2 = re.compile(ur'\{\{[Ss]isterlinks') s2 = findTemplate2.search(text) if s or s2: pywikibot.output(u'** Already done.') else: text = pywikibot.replaceCategoryLinks( text + u'{{commons|%s}}' % commonspage.title(), page.categories()) if oldText != text: pywikibot.showDiff(oldText, text) if not self.acceptall: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if self.acceptall or choice == 'y': try: msg = pywikibot.translate( pywikibot.getSite(), comment1) page.put(text, msg) except pywikibot.EditConflict: pywikibot.output( u'Skipping %s because of edit ' u'conflict' % (page.title())) except pywikibot.NoPage: pywikibot.output(u'Page does not exist in Commons!') except pywikibot.NoPage: pywikibot.output(u'Page %s does not exist?!' % page.title()) except pywikibot.IsRedirectPage: pywikibot.output(u'Page %s is a redirect; skipping.' % page.title()) except pywikibot.LockedPage: pywikibot.output(u'Page %s is locked?!' % page.title())
def main(): args = wikipedia.handleArgs() all = False genFactory = pagegenerators.GeneratorFactory() for currentArgument in args: if currentArgument.startswith("-always"): all = True else: generator = genFactory.handleArg(currentArgument) # Check if pages on which the bot should work are specified. if not generator: raise Exception('You have to specify which pages the script has to work on!') # Main Loop for i in generator: attenzioneIo = False # Dubbio su "I", che può essere articolo determinativo italiano o pronome personale inglese titolo = i.title() wikipedia.output(">>>>> " + titolo + " <<<<<") nuovoTitolo = re.sub("^(The |A |An |Il |Lo |La |I |Gli |Le |L'|Uno |Una |Un'|Un )([A-Z0-9].*)", r"{{DEFAULTSORT:\2, \1}}", titolo) if titolo == nuovoTitolo: wikipedia.output("Non c'è nessun articolo. Prossima pagina...") continue if re.search("^I ", titolo): attenzioneIo = True nuovoTitolo = re.sub("[ ']\}\}", "}}", nuovoTitolo) # Toglie spazi, apostrofi... try: oldtext = i.get() except wikipedia.IsRedirectPage: wikipedia.output(u"%s is a redirect, I'll ignore it." % i.title()) continue if re.search("\{\{DEFAULTSORT:", oldtext): wikipedia.output("C'è già un DEFAULTSORT. Prossima pagina...") continue newtext = add_text(page = i, addText = nuovoTitolo, putText = False, oldTextGiven = oldtext)[1] wikipedia.showDiff(oldtext, newtext) if not all or attenzioneIo: choice = wikipedia.inputChoice(u"Modificare?", ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') else: choice = 'y' if choice in ['A', 'a']: all = True choice = 'y' if choice in ['Y', 'y']: i.put_async(newtext, comment="Aggiungo: " + nuovoTitolo)
def run(self): # Set the edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) for page in self.generator: self.treat(page) overviewPage = wikipedia.Page(wikipedia.getSite(), u"VEIDs") text = "<!-- Note: automatically generated by robot-generate_openvz_velist.py. -->\n" text += "[VEID Naming Conventions]\n\n" text += "=== Legacy IDs ===\n" keys = self.veidlist.keys() keys.sort() lastid = "" for id in keys: pagename = self.veidlist[id] id = str(id) companyid = id[0:2] if (not lastid.startswith(companyid)) and len(id) > 4: text += "=== " + companyid + " - " + self.companies[int( companyid)] + " ===\n" text += "* [[" + pagename + "|'''" + id + "''']]''':''' [[" + pagename + "]]\n" if len(id) > 4: lastid = id text += "[[Category:VE]]" # only save if something was changed if text == overviewPage.get(): return # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % overviewPage.title()) # show what was changed wikipedia.showDiff(overviewPage.get(), text) choice = 'y' if self.debug: choice = wikipedia.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page overviewPage.put(text) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % overviewPage.aslink()) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % (overviewPage.title())) except wikipedia.SpamfilterError, error: wikipedia.output( u'Cannot change %s because of spam blacklist entry %s' % (overviewPage.title(), error.url))