def __init__(self, page, filename, summary, dry, always): self.page = pywikibot.Page( pywikibot.getSite(), page ) self.filename = filename self.summary = summary if not self.summary: self.summary = pywikibot.translate(pywikibot.getSite(), self.msg) pywikibot.setAction( self.summary )
def run(self): comment = wikipedia.translate(wikipedia.getSite(), msg) wikipedia.setAction(comment) for page in self.generator: if self.done: break self.treat(page)
def run(self): # Set the edit summary message pipes = ', '.join(self.filters) pywikibot.setAction( pywikibot.translate(pywikibot.getSite(), self.msg) % pipes) for page in self.generator: self.treat(page)
def run(self): comment = pywikibot.translate(pywikibot.getSite(), msg) pywikibot.setAction(comment) for page in self.generator: if self.done: break self.treat(page)
def run(self): for page in self.generator: if page.isRedirectPage(): page = page.getRedirectTarget() page_t = page.title() # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n>>> \03{lightpurple}%s\03{default} <<<" % page_t) page_cap = wikipedia.Page(wikipedia.getSite(), page_t.title().capitalize()) if not page_cap.exists(): wikipedia.output(u'%s doesn\'t exist' % page_cap.title()) if not self.acceptall: choice = wikipedia.inputChoice( u'Do you want to create a redirect?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': self.acceptall = True if self.acceptall or choice == 'y': try: wikipedia.setAction( wikipedia.translate(wikipedia.getSite(), msg) % page_t) page_cap.put(u"#REDIRECT [[%s]]" % page_t) print except: wikipedia.output( u"An error occurred. Retrying in 15 seconds...") time.sleep(15) continue else: wikipedia.output(u'%s already exists, skipping...\n' % page_t.title())
def process_article(page): EditMsg = "Robot: Substituting {{[[Template:RS500|RS500]]}} and common fixes" wikipedia.setAction(EditMsg) try: wikitext = page.get() except wikipedia.NoPage: return except wikipedia.IsRedirectPage: return # Fix Casing (Reduces the number of possible expressions) # Common fixes # wikitext = commonfixes.fix(wikitext) # State point. Count any changes as needing an update if they're after this line state0 = wikitext # Add |Iran=yes wikitext = re.compile(r'\{\{\s*rs500(.*?)\}\}', re.IGNORECASE).sub(r'{{subst:RS500\1}}', wikitext) wikipedia.showDiff(page.get(), wikitext) if (wikitext != state0): try: wikipedia.output(u'WRITE: Adding %s bytes.' % str(len(wikitext)-len(page.get()))) # print 'Waiting 2.5 seconds' # time.sleep(2.5) page.put(wikitext) except TypeError: print 'Skipping TypeError' return
def run(self): comment = i18n.twtranslate(self.site, 'noreferences-add-tag') pywikibot.setAction(comment) for page in self.generator: # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) try: text = page.get() except pywikibot.NoPage: pywikibot.output(u"Page %s does not exist?!" % page.title(asLink=True)) continue except pywikibot.IsRedirectPage: pywikibot.output(u"Page %s is a redirect; skipping." % page.title(asLink=True)) continue except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked?!" % page.title(asLink=True)) continue if pywikibot.getSite().sitename() == 'wikipedia:en' and page.isIpEdit(): pywikibot.output( u"Page %s is edited by IP. Possible vandalized" % page.title(asLink=True)) continue if self.lacksReferences(text): newText = self.addReferences(text) self.save(page, newText)
def process_article(page, tag): EditMsg = "Robot: Tagging {{Film|%s}}" % tag wikipedia.setAction(EditMsg) try: wikitext = page.get() except wikipedia.NoPage: page.put("{{Film|%s}}" % tag) return except wikipedia.IsRedirectPage: return if re.search(tag, wikitext, re.I): print "Skipping " + str(page) return # Fix Casing (Reduces the number of possible expressions) wikitext = re.compile(r'\{\{\s*(template:|)film', re.IGNORECASE).sub(r'{{Film', wikitext) state0 = wikitext # Add tf parameter wikitext = re.compile(r'\{\{\s*film(.*?)\}\}', re.IGNORECASE).sub(r'{{Film\1|%s}}' % tag, wikitext) wikipedia.showDiff(state0, wikitext) if (wikitext != state0): try: print 'Going to edit %s' % str(page) wikipedia.output(u'WRITE: Adding %s bytes.' % str(len(wikitext) - len(state0))) page.put(wikitext) except KeyboardInterrupt: sys.exit()
def process_article(page, tag): EditMsg = "Robot: Tagging {{Film|%s}}" %tag wikipedia.setAction(EditMsg) try: wikitext = page.get() except wikipedia.NoPage: page.put("{{Film|%s}}" %tag) return except wikipedia.IsRedirectPage: return if re.search(tag,wikitext,re.I): print "Skipping " + str(page) return # Fix Casing (Reduces the number of possible expressions) wikitext = re.compile(r'\{\{\s*(template:|)film', re.IGNORECASE).sub(r'{{Film', wikitext) state0 = wikitext # Add tf parameter wikitext = re.compile(r'\{\{\s*film(.*?)\}\}', re.IGNORECASE).sub(r'{{Film\1|%s}}' %tag, wikitext) wikipedia.showDiff(state0, wikitext) if (wikitext != state0): try: print 'Going to edit %s' %str(page) wikipedia.output(u'WRITE: Adding %s bytes.' % str(len(wikitext)-len(state0))) page.put(wikitext) except KeyboardInterrupt: sys.exit()
def run(self): # Set the edit summary message pipes = ', '.join(self.filters) pywikibot.setAction(pywikibot.translate(pywikibot.getSite(), self.msg) % pipes) for page in self.generator: self.treat(page)
def __init__(self): #Setup Familys for Wikia Involved self.naruto = wikipedia.getSite(code=u'en', fam=u'naruto') wikipedia.setAction(wikipedia.translate(self.naruto, msg)) self.jutsuList = [ u'List of Ninjutsu', u'List of Taijutsu', u'List of Genjutsu' ]
def process_article(page): EditMsg = "Robot: Substituting {{[[Template:RS500|RS500]]}} and common fixes" wikipedia.setAction(EditMsg) try: wikitext = page.get() except wikipedia.NoPage: return except wikipedia.IsRedirectPage: return # Fix Casing (Reduces the number of possible expressions) # Common fixes # wikitext = commonfixes.fix(wikitext) # State point. Count any changes as needing an update if they're after this line state0 = wikitext # Add |Iran=yes wikitext = re.compile(r'\{\{\s*rs500(.*?)\}\}', re.IGNORECASE).sub(r'{{subst:RS500\1}}', wikitext) wikipedia.showDiff(page.get(), wikitext) if (wikitext != state0): try: wikipedia.output(u'WRITE: Adding %s bytes.' % str(len(wikitext) - len(page.get()))) # print 'Waiting 2.5 seconds' # time.sleep(2.5) page.put(wikitext) except TypeError: print 'Skipping TypeError' return
def setSummaryMessage(self, disambPage, new_targets=[], unlink=False, dn=False): # TODO: setSummaryMessage() in solve_disambiguation now has parameters # new_targets and unlink. Make use of these here. comment = pywikibot.translate(self.mysite, self.msg) \ % disambPage.title() pywikibot.setAction(comment)
def run(self): comment = pywikibot.translate(self.site, msg) pywikibot.setAction(comment) for page in self.generator: # Show the title of the page we're working on. # Highlight the title in purple. pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) try: text = page.get() except pywikibot.NoPage: pywikibot.output(u"Page %s does not exist?!" % page.title(asLink=True)) continue except pywikibot.IsRedirectPage: pywikibot.output(u"Page %s is a redirect; skipping." % page.title(asLink=True)) continue except pywikibot.LockedPage: pywikibot.output(u"Page %s is locked?!" % page.title(asLink=True)) continue if pywikibot.getSite().sitename() == 'wikipedia:en' and page.isIpEdit(): pywikibot.output( u"Page %s is edited by IP. Possible vandalized" % page.title(asLink=True)) continue if self.lacksReferences(text): newText = self.addReferences(text) self.save(page, newText)
def main(): #Setup Familys for Wikia Involved anime = wikipedia.getSite(code=u'en', fam=u'anime') wikipedia.setAction(wikipedia.translate(anime, msg)) siteList = [] pageList = [] #Get Project Wiki Listing wikiaIds = [] page = wikipedia.Page(anime, u'Bots/Wiki', None, 4) #4=Project Namespace try: text = page.get() r = re.compile(u'^.*<!-- \|\|START\|\| -->\n?', re.UNICODE | re.DOTALL) text = re.sub(r, u'', text) r = re.compile(u'\n?<!-- \|\|END\|\| -->.*$', re.UNICODE | re.DOTALL) text = re.sub(r, u'', text) r = re.compile(u'\n', re.UNICODE | re.MULTILINE | re.DOTALL) wikilist = re.split(r, text) r = re.compile(u'^#|^\s*$|^\[', re.UNICODE | re.MULTILINE | re.DOTALL) for wiki in wikilist: if not re.match(r, wiki): wikiaIds.append(wiki) except wikipedia.NoPage: moreYears = False for wiki in wikiaIds: siteList.append(wikipedia.getSite(code=u'en', fam=wiki)) #Get Page List page = wikipedia.Page(anime, u'Bots/CleanDelete/Pages', None, 4) #4=Project Namespace try: text = page.get() r = re.compile(u'^.*<!-- \|\|START\|\| -->\n?', re.UNICODE | re.DOTALL) text = re.sub(r, u'', text) r = re.compile(u'\n?<!-- \|\|END\|\| -->.*$', re.UNICODE | re.DOTALL) text = re.sub(r, u'', text) r = re.compile(u'\n', re.UNICODE | re.MULTILINE | re.DOTALL) pages = re.split(r, text) r = re.compile(u'^#|^\s*$', re.UNICODE | re.MULTILINE | re.DOTALL) for p in pages: if not re.match(r, p): pageList.append(p) except wikipedia.NoPage: moreYears = False for page in pageList: wikipedia.output(u'Doing Page %s' % page) for site in siteList: p = wikipedia.Page(site, page) if p.exists(): wikipedia.output(u'Page %s exists on %s.' % (p.title(), site.family.name)) wikipedia.output(u'Deleting %s' % p.title()) p.delete(wikipedia.translate(anime, msg), True) else: wikipedia.output( u'Page %s does not exist on %s, skipping page on site.' % (p.title(), site.family.name))
def __init__(self): self.header = u'==' + __name__ + '-' + strftime( u'%Y-%m-%d@%H:%M:%S (%Z)', gmtime()) + u'==' self.coreWiki = wikipedia.getSite(code=u'en', fam=u'anime') self.logPage = wikipedia.Page(self.coreWiki, u'Bots/DefaultLog', None, 4) #4=Project Namespace wikipedia.setAction( wikipedia.translate(self.coreWiki, self.getSummaries()))
def __init__(self, page, filename, summary, overwrite): self.page = pywikibot.Page( pywikibot.getSite(), page ) self.filename = filename self.summary = summary self.overwrite = overwrite if not self.summary: self.summary = pywikibot.translate(pywikibot.getSite(), self.msg) pywikibot.setAction( self.summary )
def main(): summary_commandline,template,gen = None,None,None exceptions,PageTitles,namespaces = [],[],[] cat='' autoText,autoTitle = False,False genFactory = pagegenerators.GeneratorFactory() arg=False#------if you dont want to work with arguments leave it False if you want change it to True--- if arg==False: for arg in wikipedia.handleArgs(): if arg == '-autotitle': autoTitle = True elif arg == '-autotext': autoText = True elif arg.startswith( '-page:' ): if len(arg) == 6: PageTitles.append(wikipedia.input( u'Which page do you want to chage?' )) else: PageTitles.append(arg[6:]) elif arg.startswith( '-cat:' ): if len(arg) == 5: cat=wikipedia.input( u'Which Category do you want to chage?' ) else: cat='Category:'+arg[5:] elif arg.startswith( '-template:' ): if len(arg) == 10: template.append(wikipedia.input( u'Which Template do you want to chage?' )) else: template.append('Template:'+arg[10:]) elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith( '-namespace:' ): namespaces.append( int( arg[11:] ) ) elif arg.startswith( '-ns:' ): namespaces.append( int( arg[4:] ) ) elif arg.startswith( '-summary:' ): wikipedia.setAction( arg[9:] ) summary_commandline = True else: generator = genFactory.handleArg(arg) if generator: gen = generator else: PageTitles = [raw_input(u'Page:> ').decode('utf-8')] if cat!='': facatfalist=facatlist(cat) if facatfalist!=False: run(facatfalist) if PageTitles: pages = [wikipedia.Page(faSite,PageTitle) for PageTitle in PageTitles] gen = iter( pages ) if not gen: wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator( gen,namespaces ) preloadingGen = pagegenerators.PreloadingGenerator( gen,pageNumber = 60 )#---number of pages that you want load at same time run(preloadingGen)
def main(): yearnumber = 1998 yearset1 = yearnumber - 1 if yearnumber >= 2000: yearset2 = yearnumber - 2000 else: yearset2 = yearnumber - 1900 if yearset2 < 10: yearset2 = "0" + str(yearset2) yearsetfinal = str(yearset1) + "–" + str(yearset2) loopsat = True # loop for arg in wikipedia.handleArgs(): if arg.startswith('-year'): if len(arg) == 5: yearnumber = int(wikipedia.input('What year do you want to start at?')) elif len(arg) > 5: yearnumber = int(arg[6:]) while loopsat == True: #start loop if yearnumber >= 2011: return listpage = wikipedia.Page(site, "DFB Cup " + yearsetfinal) if listpage.exists() == True: wikipedia.setAction("Making redirects per [[Wikipedia:Bot requests/Archive 22#DFB_Cups|Botreq]]") wikipedia.output("> The target for [[" + str(yearnumber) + " DFB Cup Final]] exists") rd = "#REDIRECT [[DFB Cup " + yearsetfinal + "#Final]]" p1 = wikipedia.Page(site, str(yearnumber) + " DFB Cup Final") pagelist = [p1] for i in pagelist: if i.exists() == False: wikipedia.output(">>Creating [[%s]]" % i.title() ) i.put(rd) else: wikipedia.output(">>[[en:%s]] already exsists" % i.title() ) else: wikipedia.output(">DFB Cup " + yearsetfinal + " does not exist") #Redefine values yearnumber = yearnumber + 1 yearset1 = yearnumber - 1 yearset2 = yearnumber - 1900 if yearnumber >= 2000: yearset2 = yearnumber - 2000 else: yearset2 = yearnumber - 1900 if yearset2 < 10: yearset2 = "0" + str(yearset2) yearsetfinal = str(yearset1) + "-" + str(yearset2) if '<div class="usermessage">' in text: #check talk page for messages wikipedia.output(u'NOTE: You have unread messages on %s' % self) wikipedia.crash() #stop wikipedia.output("Done!") wikipage = "User:Legoktm/BOTFAQ/Code/redir.py" code = "redir.py" legoktm.upload(wikipage, code)
def treat(to_pl, fromsite): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: print "Can't work on redirect page." return except wikipedia.NoPage: print "Page not found." return from_pl = None for interwiki in interwikis: if interwiki.site() == fromsite: from_pl = interwiki if from_pl is None: print "Interwiki link to %s not found." % repr(fromsite) return from_text = from_pl.get() wikipedia.setAction(wikipedia.translate(mysite.lang, msg) + from_pl.aslink()) # search start of table table = get_table(from_text) if not table: wikipedia.output(u"No table found in %s" % (from_pl.aslink())) return print_debug(u"Copying images") if copy_images: # extract image links from original table images=imagelinks(fromsite, table) for image in images: # Copy the image to the current wikipedia, copy the image description page as well. # Prompt the user so that he can translate the filename. new_filename = lib_images.transfer_image(wikipedia.Page(fromsite, image), debug) # if the upload succeeded if new_filename: old_image_tag = wikipedia.Page(fromsite, image).title() new_image_tag = wikipedia.Page(mysite, mysite.image_namespace() + ":" + new_filename).title() print_debug(u"Replacing " + old_image_tag + " with " + new_image_tag) # We want to replace "Image:My pic.jpg" as well as "image:my_pic.jpg", so we need a regular expression. old_image_tag = old_image_tag.replace(" ", "[ \_]") old_image_tag = "[" + old_image_tag[0].upper() + old_image_tag[0].lower() + "]" + old_image_tag[1:] #todo: regex for first letter of filename, i.e. first letter after the colon rOld_image_tag = re.compile(old_image_tag) table = re.sub(old_image_tag, new_image_tag, table) translated_table = translator.translate(table, type, fromsite.lang, debug, mysite.lang) if not translated_table: print "Could not translate table." return print_debug(u"\n" + translated_table) # add table to top of the article, seperated by a blank lines to_text = translated_table + "\n\n" + to_text if not debug: # save changes on Wikipedia to_pl.put(to_text, minorEdit='0')
def main(): summary_commandline, gen, template = None, None, None namespaces, PageTitles, exceptions = [], [], [] encat = '' autoText, autoTitle = False, False recentcat, newcat = False, False genFactory = pagegenerators.GeneratorFactory() for arg in wikipedia.handleArgs(): if arg == '-autotitle': autoTitle = True elif arg == '-autotext': autoText = True elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-start'): firstPageTitle = arg[7:] if not firstPageTitle: firstPageTitle = wikipedia.input( u'At which page do you want to start?') firstPageTitle = wikipedia.Page( fasite, firstPageTitle).title(withNamespace=False) gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, 0, includeredirects=True) elif arg.startswith('-template:'): template = arg[10:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) elif arg.startswith('-summary:'): wikipedia.setAction(arg[9:]) summary_commandline = True else: generator = genFactory.handleArg(arg) if generator: gen = generator if not gen: wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.PreloadingGenerator(gen, pageNumber=60) preloadingGen = pagegenerators.NamespaceFilterPageGenerator( gen, namespaces) else: preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber=60) _cache, last_timestamp = get_cache() add_text(preloadingGen) now = str(datetime.now()) todaynum = int(now.split('-')[2].split(' ')[0]) + int( now.split('-')[1]) * 30 + (int(now.split('-')[0]) - 2000) * 365 if last_timestamp + 3 < todaynum: put_cache(_cache, todaynum) else: put_cache({}, 0)
def main(): gaia = wikipedia.getSite(code=u'en', fam=u'gaia') plot = wikipedia.getSite(code=u'en', fam=u'plotwiki') wikipedia.setAction(wikipedia.translate(gaia, msg)) wikipedia.setAction(wikipedia.translate(plot, msg)) final = u'<noinclude><!-- Do not edit this page, this page is automatically created by a Bot. -->\n' final += u'==Most Recent Events==</noinclude>\n' nonrecent = u'<noinclude>==Older Events==\n' end = u'\n\'\'View everything here on the [[Plotwiki:|plotwiki...]]\'\'</noinclude>' moreYears = True year = 04 events = [] temp = [] while moreYears: y = str(year) page = wikipedia.Page(plot, u'Template:Pnav%s' % y.zfill(2)) try: text = page.get() r = sre.compile(u'^.*<span style=".*normal.*">(.*)</span>.*$', sre.UNICODE | sre.MULTILINE | sre.DOTALL) text = sre.sub(r, u'\\1', text) r = sre.compile(u'\s+\|\s+', sre.UNICODE | sre.MULTILINE | sre.DOTALL) pages = sre.split(r, text) r = sre.compile(u'\[\[([^|]*)(\|.*)?\]\]', sre.UNICODE) for p in pages: temp.append(sre.sub(r, u'\\1', p)) year += 1 except wikipedia.NoPage: moreYears = False for e in temp: if not e in events: events.append(e) events = reversed(list(events)) x = 1 for e in events: final += u'* [[Plotwiki:%s|]]\n' % e x += 1 if x == 6: final += nonrecent if x <= 6: final += end final += end page = wikipedia.Page(gaia, u'Plotwiki Current Events') page.put(final)
def createcat(cat,txt,action): wikipedia.setAction(action) p=wikipedia.Page(site,"Category:"+cat) ptxt="Creating category "+cat print Fore.GREEN+"-"*len(ptxt) print ptxt print "-"*len(ptxt),Fore.WHITE sleep(30) p.put(txt) return
def createcat(cat, txt, action): wikipedia.setAction(action) p = wikipedia.Page(site, "Category:" + cat) ptxt = "Creating category " + cat print Fore.GREEN + "-" * len(ptxt) print ptxt print "-" * len(ptxt), Fore.WHITE sleep(30) p.put(txt) return
def main(): genFactory = pagegenerators.GeneratorFactory() PageTitles = [] xmlFilename = None always = False ignorepdf = False limit = None namespaces = [] generator = None for arg in pywikibot.handleArgs(): if arg.startswith('-namespace:'): try: namespaces.append(int(arg[11:])) except ValueError: namespaces.append(arg[11:]) elif arg.startswith('-summary:'): pywikibot.setAction(arg[9:]) elif arg == '-always': always = True elif arg == '-ignorepdf': ignorepdf = True elif arg.startswith('-limit:'): limit = int(arg[7:]) elif arg.startswith('-xmlstart'): if len(arg) == 9: xmlStart = pywikibot.input( u'Please enter the dumped article to start with:') else: xmlStart = arg[10:] elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = pywikibot.input( u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] else: genFactory.handleArg(arg) if xmlFilename: try: xmlStart except NameError: xmlStart = None generator = XmlDumpPageGenerator(xmlFilename, xmlStart, namespaces) if not generator: generator = genFactory.getCombinedGenerator() if not generator: # syntax error, show help text from the top of this file pywikibot.showHelp('reflinks') return generator = pagegenerators.PreloadingGenerator(generator, pageNumber=50) generator = pagegenerators.RedirectFilterPageGenerator(generator) bot = ReferencesRobot(generator, always, limit, ignorepdf) bot.run()
def put(self, title, contents): mysite = wikipedia.getSite() page = wikipedia.Page(mysite, title) # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u">>> \03{lightpurple}%s\03{default} <<<" % page.title()) if self.summary: comment = self.summary else: comment = wikipedia.translate(mysite, self.msg) comment_top = comment + " - " + wikipedia.translate(mysite, self.msg_top) comment_bottom = comment + " - " + wikipedia.translate(mysite, self.msg_bottom) comment_force = comment + " *** " + wikipedia.translate(mysite, self.msg_force) + " ***" # Remove trailing newlines (cause troubles when creating redirects) contents = re.sub('^[\r\n]*','', contents) if page.exists(): if self.append == "Top": wikipedia.output(u"Page %s already exists, appending on top!" % title) contents = contents + page.get() comment = comment_top elif self.append == "Bottom": wikipedia.output(u"Page %s already exists, appending on bottom!" % title) contents = page.get() + contents comment = comment_bottom elif self.force: wikipedia.output(u"Page %s already exists, ***overwriting!" % title) comment = comment_force else: wikipedia.output(u"Page %s already exists, not adding!" % title) return else: if self.autosummary: comment = '' wikipedia.setAction('') if self.dry: wikipedia.output("*** Dry mode ***\n" + \ "\03{lightpurple}title\03{default}: " + title + "\n" + \ "\03{lightpurple}contents\03{default}:\n" + contents + "\n" \ "\03{lightpurple}comment\03{default}: " + comment + "\n") return try: page.put(contents, comment = comment, minorEdit = self.minor) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % title) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % title) except wikipedia.SpamfilterError, error: wikipedia.output(u'Cannot change %s because of spam blacklist entry %s' % (title, error.url))
def main(): genFactory = pagegenerators.GeneratorFactory() PageTitles = [] xmlFilename = None always = False ignorepdf = False limit = None namespaces = [] generator = None for arg in pywikibot.handleArgs(): if arg.startswith('-namespace:'): try: namespaces.append(int(arg[11:])) except ValueError: namespaces.append(arg[11:]) elif arg.startswith('-summary:'): pywikibot.setAction(arg[9:]) elif arg == '-always': always = True elif arg == '-ignorepdf': ignorepdf= True elif arg.startswith('-limit:'): limit = int(arg[7:]) elif arg.startswith('-xmlstart'): if len(arg) == 9: xmlStart = pywikibot.input( u'Please enter the dumped article to start with:') else: xmlStart = arg[10:] elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = pywikibot.input( u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] else: genFactory.handleArg(arg) if xmlFilename: try: xmlStart except NameError: xmlStart = None generator = XmlDumpPageGenerator(xmlFilename, xmlStart, namespaces) if not generator: generator = genFactory.getCombinedGenerator() if not generator: # syntax error, show help text from the top of this file pywikibot.showHelp('reflinks') return generator = pagegenerators.PreloadingGenerator(generator, pageNumber = 50) generator = pagegenerators.RedirectFilterPageGenerator(generator) bot = ReferencesRobot(generator, always, limit, ignorepdf) bot.run()
def main(): gaia = wikipedia.getSite(code=u'en', fam=u'gaia') plot = wikipedia.getSite(code=u'en', fam=u'plotwiki') wikipedia.setAction(wikipedia.translate(gaia, msg)) wikipedia.setAction(wikipedia.translate(plot, msg)) final = u'<noinclude><!-- Do not edit this page, this page is automatically created by a Bot. -->\n' final+= u'==Most Recent Events==</noinclude>\n' nonrecent = u'<noinclude>==Older Events==\n' end = u'\n\'\'View everything here on the [[Plotwiki:|plotwiki...]]\'\'</noinclude>' moreYears = True year = 04 events = [] temp = [] while moreYears: y = str(year) page = wikipedia.Page(plot, u'Template:Pnav%s' % y.zfill(2)) try: text = page.get() r = sre.compile(u'^.*<span style=".*normal.*">(.*)</span>.*$', sre.UNICODE | sre.MULTILINE | sre.DOTALL) text = sre.sub(r, u'\\1', text) r = sre.compile(u'\s+\|\s+', sre.UNICODE | sre.MULTILINE | sre.DOTALL) pages = sre.split(r, text) r = sre.compile(u'\[\[([^|]*)(\|.*)?\]\]', sre.UNICODE) for p in pages: temp.append(sre.sub(r, u'\\1', p)) year+=1 except wikipedia.NoPage: moreYears = False for e in temp: if not e in events: events.append(e) events = reversed(list(events)); x = 1 for e in events: final+=u'* [[Plotwiki:%s|]]\n' % e x+=1 if x==6: final+=nonrecent if x<=6: final+=end final+=end page = wikipedia.Page(gaia, u'Plotwiki Current Events') page.put(final)
def run(self): # TODO: make all generators return a redicet type indicator, # thus make them usabile with 'both' if self.action == 'double': # get summary text wikipedia.setAction( wikipedia.translate(wikipedia.getSite(), msg_double)) self.fix_double_redirects() elif self.action == 'broken': self.delete_broken_redirects() elif self.action == 'both': self.fix_double_or_delete_broken_redirects()
def run(self): # Set the edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) for page in self.generator: self.treat(page) overviewPage = wikipedia.Page(wikipedia.getSite(), u"VEIDs") text = "<!-- Note: automatically generated by robot-generate_openvz_velist.py. -->\n" text += "[VEID Naming Conventions]\n\n" text += "=== Legacy IDs ===\n" keys = self.veidlist.keys() keys.sort() lastid = "" for id in keys: pagename = self.veidlist[id] id = str(id) companyid = id[0:2] if (not lastid.startswith(companyid)) and len(id) > 4: text += "=== " + companyid + " - " + self.companies[int( companyid)] + " ===\n" text += "* [[" + pagename + "|'''" + id + "''']]''':''' [[" + pagename + "]]\n" if len(id) > 4: lastid = id text += "[[Category:VE]]" # only save if something was changed if text == overviewPage.get(): return # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % overviewPage.title()) # show what was changed wikipedia.showDiff(overviewPage.get(), text) choice = 'y' if self.debug: choice = wikipedia.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page overviewPage.put(text) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % overviewPage.aslink()) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % (overviewPage.title())) except wikipedia.SpamfilterError, error: wikipedia.output( u'Cannot change %s because of spam blacklist entry %s' % (overviewPage.title(), error.url))
def main(): summary_commandline,gen,template = None,None,None namespaces,PageTitles,exceptions = [],[],[] encat='' autoText,autoTitle = False,False recentcat,newcat=False,False genFactory = pagegenerators.GeneratorFactory() for arg in wikipedia.handleArgs(): if arg == '-autotitle': autoTitle = True elif arg == '-autotext': autoText = True elif arg.startswith( '-except:' ): exceptions.append( arg[8:] ) elif arg.startswith('-start'): firstPageTitle = arg[7:] if not firstPageTitle: firstPageTitle = wikipedia.input( u'At which page do you want to start?') firstPageTitle = wikipedia.Page(fasite,firstPageTitle).title(withNamespace=False) gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, 0, includeredirects=True) elif arg.startswith( '-template:' ): template = arg[10:] elif arg.startswith( '-namespace:' ): namespaces.append( int( arg[11:] ) ) elif arg.startswith( '-summary:' ): wikipedia.setAction( arg[9:] ) summary_commandline = True else: generator = genFactory.handleArg( arg ) if generator: gen = generator if not gen: wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.PreloadingGenerator(gen,pageNumber = 60) preloadingGen = pagegenerators.NamespaceFilterPageGenerator( gen,namespaces ) else: preloadingGen = pagegenerators.PreloadingGenerator(gen,pageNumber = 60) _cache,last_timestamp=get_cache() add_text(preloadingGen) now = str(datetime.now()) todaynum=int(now.split('-')[2].split(' ')[0])+int(now.split('-')[1])*30+(int(now.split('-')[0])-2000)*365 if last_timestamp+3 < todaynum: put_cache(_cache,todaynum) else: put_cache({},0)
def run(self): self.count = { "target" : [], "done" : [] } pywikibot.setAction( self.summary ) for page in self.generator: self.treat(page) if self.outputwiki: for page in sorted(set(self.count["target"])-set(self.count["done"])): print( (u"*%s" % page).encode('utf_8') ) print "Done: %.01f%% (%d/%d)" % \ ( 100*len(self.count["done"]) / float(len(self.count["target"])), len(self.count["done"]), len(self.count["target"]), )
def main(): site = wikipedia.getSite() roadnumber = 419 loopsat = True for arg in wikipedia.handleArgs(): if arg.startswith('-road'): if len(arg) == 5: roadnumber = int(wikipedia.input('What road do you want to start at?')) elif len(arg) > 5: roadnumber = int(arg[6:]) while loopsat == True: if roadnumber >= 1000: return listpage = wikipedia.Page(site, "List of highways numbered %s" % str(roadnumber)) if listpage.exists() == True: wikipedia.setAction("Robot: Making redirects for Wikiproject U.S. Roads") wikipedia.output(">List of highways numbered %s exists" % str(roadnumber)) rd = "#REDIRECT [[List of highways numbered %s]]" % str(roadnumber) p1 = wikipedia.Page(site, "Route %s" % str(roadnumber)) p2 = wikipedia.Page(site, "Highway %s" % str(roadnumber)) p3 = wikipedia.Page(site, "State Route %s" % str(roadnumber)) p4 = wikipedia.Page(site, "State Highway %s" % str(roadnumber)) p5 = wikipedia.Page(site, "State Road %s" % str(roadnumber)) p6 = wikipedia.Page(site, "SR_%s" % str(roadnumber)) p7 = wikipedia.Page(site, "SH_%s" % str(roadnumber)) p8 = wikipedia.Page(site, "SR-%s" % str(roadnumber)) p9 = wikipedia.Page(site, "SH-%s" % str(roadnumber)) p10 = wikipedia.Page(site, "Federal Highway %s" % str(roadnumber)) p11 = wikipedia.Page(site, "National Highway %s" % str(roadnumber)) p12 = wikipedia.Page(site, "Federal Road %s" % str(roadnumber)) p13 = wikipedia.Page(site, "National Road %s" % str(roadnumber)) p14 = wikipedia.Page(site, "Federal Route %s" % str(roadnumber)) p15 = wikipedia.Page(site, "National Route %s" % str(roadnumber)) p16 = wikipedia.Page(site, "SR%s" % str(roadnumber)) p17 = wikipedia.Page(site, "SH%s" % str(roadnumber)) pagelist = [p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,p16,p17] for i in pagelist: if i.exists() == False: wikipedia.output(">>Creating %s" % i.title() ) i.put(rd) else: wikipedia.output(">>%s exists" % i.title() ) else: wikipedia.output(">List of highways numbered %s does not exist" % str(roadnumber)) #End Of Loop roadnumber = roadnumber + 1 wikipedia.output("Done!")
def treat(self, page): ''' Loads a page, converts all HTML tables in its text to wiki syntax, and saves the converted text. Returns True if the converted table was successfully saved, otherwise returns False. ''' pywikibot.output(u'\n>>> %s <<<' % page.title()) site = page.site() try: text = page.get() except pywikibot.NoPage: pywikibot.error(u"couldn't find %s" % page.title()) return False except pywikibot.IsRedirectPage: pywikibot.output(u'Skipping redirect %s' % page.title()) return False newText, convertedTables, warningSum = self.convertAllHTMLTables(text) # Check if there are any marked tags left markedTableTagR = re.compile("<##table##|</##table##>", re.IGNORECASE) if markedTableTagR.search(newText): pywikibot.error( u'not all marked table start or end tags processed!') return if convertedTables == 0: pywikibot.output(u"No changes were necessary.") else: if config.table2wikiAskOnlyWarnings and warningSum == 0: doUpload = True else: if config.table2wikiSkipWarnings: doUpload = True else: print( "There were %i replacement(s) that might lead to bad " "output.") % warningSum doUpload = (pywikibot.input( u'Do you want to change the page anyway? [y|N]') == "y" ) if doUpload: # get edit summary message if warningSum == 0: pywikibot.setAction( i18n.twtranslate(site.lang, 'table2wiki-no-warning')) else: pywikibot.setAction( i18n.twntranslate(site.lang, 'table2wiki-warnings', {'count': warningSum})) page.put_async(newText)
def run(self): self.count = { "target" : [], "done" : [] } pywikibot.setAction( self.summary ) musecat = catlib.Category( pywikibot.getSite(), u'Category:博物館' ) ignore_list = [ catlib.Category( pywikibot.getSite(), u'Category:登録博物館' ), catlib.Category( pywikibot.getSite(), u'Category:博物館相当施設' ), catlib.Category( pywikibot.getSite(), u'Category:全国博物館園職員録' ), catlib.Category( pywikibot.getSite(), u'Category:全国博物館総覧' ), ] catlist = musecat.subcategoriesList() catlist = set( catlist ) - set( ignore_list ) for page in self.generator: self.treat(page, catlist)
def run(self): self.count = {"target": [], "done": []} pywikibot.setAction(self.summary) for page in self.generator: self.treat(page) if self.outputwiki: for page in sorted( set(self.count["target"]) - set(self.count["done"])): print((u"*%s" % page).encode('utf_8')) print "Done: %.01f%% (%d/%d)" % \ ( 100*len(self.count["done"]) / float(len(self.count["target"])), len(self.count["done"]), len(self.count["target"]), )
def run(self): for page in self.nets_generator: self.registerIpNet(page) for page in self.hosts_generator: self.registerIpHost(page) # Set the edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) for net in self.nets: self.editIpNet(self.nets[net])
def treat(self, page): ''' Loads a page, converts all HTML tables in its text to wiki syntax, and saves the converted text. Returns True if the converted table was successfully saved, otherwise returns False. ''' pywikibot.output(u'\n>>> %s <<<' % page.title()) site = page.site() try: text = page.get() except pywikibot.NoPage: pywikibot.error(u"couldn't find %s" % page.title()) return False except pywikibot.IsRedirectPage: pywikibot.output(u'Skipping redirect %s' % page.title()) return False newText, convertedTables, warningSum = self.convertAllHTMLTables(text) # Check if there are any marked tags left markedTableTagR = re.compile("<##table##|</##table##>", re.IGNORECASE) if markedTableTagR.search(newText): pywikibot.error( u'not all marked table start or end tags processed!') return if convertedTables == 0: pywikibot.output(u"No changes were necessary.") else: if config.table2wikiAskOnlyWarnings and warningSum == 0: doUpload = True else: if config.table2wikiSkipWarnings: doUpload = True else: print("There were %i replacement(s) that might lead to bad " "output.") % warningSum doUpload = (pywikibot.input( u'Do you want to change the page anyway? [y|N]') == "y") if doUpload: # get edit summary message if warningSum == 0: pywikibot.setAction( i18n.twtranslate(site.lang, 'table2wiki-no-warning')) else: pywikibot.setAction( i18n.twntranslate(site.lang, 'table2wiki-warnings', {'count': warningSum})) page.put_async(newText)
def run(self): # Set the edit summary message pywikibot.setAction(pywikibot.translate(pywikibot.getSite(), self.msg)) linkingPage = pywikibot.Page(pywikibot.getSite(), self.index) self.prefix = linkingPage.titleWithoutNamespace() if self.prefix[0:6] == 'Liber:': self.prefix = self.prefix[6:] pywikibot.output(u"Using prefix %s" % self.prefix) gen = self.PagesGenerator() site = pywikibot.getSite() self.username = config.usernames[site.family.name][site.lang] for pageno in gen: pywikibot.output("Processing page %d" % pageno) self.treat(pageno)
def run(self): # Set the edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) for page in self.generator: self.treat(page) overviewPage = wikipedia.Page(wikipedia.getSite(), u"Unmanaged instances") text = "<!-- Note: automatically generated by generate_unmanaged.py. -->\n{| border=1\n|-\n" keys = self.veidlist.keys() keys.sort() lastid = "" for pagename in keys: eth = self.veidlist[pagename] text = text + "|-\n| [[" + pagename + "]] \n| " + eth + "\n" text = text + "|}\n" # only save if something was changed if text == overviewPage.get(): return # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % overviewPage.title()) # show what was changed wikipedia.showDiff(overviewPage.get(), text) choice = 'y' if self.debug: choice = wikipedia.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page overviewPage.put(text) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % overviewPage.aslink()) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % (overviewPage.title())) except wikipedia.SpamfilterError, error: wikipedia.output( u'Cannot change %s because of spam blacklist entry %s' % (overviewPage.title(), error.url))
def main(): site = wikipedia.getSite() cat = catlib.Category(site,'Category:Films by year') subcats = cat.subcategories(recurse = True) cats = [] fulllist = [] for cat in subcats: cats.append(delink(cat)) for cat in cats: list = getartincat(cat) for page in list: fulllist.append(page.toggleTalkPage()) tag = "{{Film}}" tag2 = "{{Film|nested=yes}}" pages = [] wikipedia.setAction('Tagging for [[WP:FILM]] %s' % tag) for page in fulllist: dopage(page)
def main(): #Setup Familys for Wikia Involved anime = wikipedia.getSite(code=u'en', fam=u'anime') wikipedia.setAction(wikipedia.translate(anime, msg)) siteList = [] #Get Project Wiki Listing wikiaIds = [] page = wikipedia.Page(anime, u'Bots/Wiki', None, None, 4)#4=Project Namespace try: text = page.get() r = sre.compile(u'^.*<!-- \|\|START\|\| -->\n?', sre.UNICODE | sre.DOTALL) text = sre.sub(r, u'', text) r = sre.compile(u'\n?<!-- \|\|END\|\| -->.*$', sre.UNICODE | sre.DOTALL) text = sre.sub(r, u'', text) r = sre.compile(u'\n', sre.UNICODE | sre.MULTILINE | sre.DOTALL) wikilist = sre.split(r, text) for wiki in wikilist: if wiki != u'': wikiaIds.append(wiki) except wikipedia.NoPage: moreYears = False for wiki in wikiaIds: siteList.append(wikipedia.getSite(code=u'en', fam=wiki)) commonstart = u'@import "http://en.anime.wikia.com/index.php?title=MediaWiki:Anime-Common.css&action=raw&ctype=text/css";' monobookstart = u'@import "http://en.anime.wikia.com/index.php?title=MediaWiki:Anime-Monobook.css&action=raw&ctype=text/css";' for site in siteList: common = wikipedia.Page(site, u'Common.css', None, None, 8)#8=MediaWiki Namespace monobook = wikipedia.Page(site, u'Monobook.css', None, None, 8)#8=MediaWiki Namespace siteSource = u'' try: siteSource = sitePage.get() except wikipedia.NoPage: wikipedia.output(u'Site %s has no %s template, creating it' % (site, template)) if siteSource != templateSource: wikipedia.output(u'Site \'%s\' template status: Needs Updating' % site) wikipedia.output(u'Updating template on %s' % site) sitePage.put(templateSource) else: wikipedia.output(u'Site \'%s\' template status: Ok' % site)
def __init__(self, pages): self.sourceWiki = wikipedia.getSite(code=u'en', fam=u'animanga') wikipedia.setAction(wikipedia.translate(self.sourceWiki, self.summaries)) self.getQue() syncTemplate = wikipedia.Page(self.sourceWiki, u'Sync', None, 10)#10=Template Namespace pageList = [] if len(pages): refs = syncTemplate.getReferences() for pageTitle in pages: otherPage = wikipedia.Page(self.sourceWiki, pageTitle) for syncPage in refs: if syncPage.namespace() === otherPage.namespace() and syncPage.titleWithoutNamespace() === otherPage.titleWithoutNamespace(): pageList.push( syncPage ) break else: for syncPage in syncTemplate.getReferences(): pageList.push( syncPage ) self.generator = iter(pageList)
def run(self): # Set the edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) for page in self.generator: self.treat(page) overviewPage = wikipedia.Page(wikipedia.getSite(), u"VEIDs") text = "<!-- Note: automatically generated by robot-generate_openvz_velist.py. -->\n" text += "[VEID Naming Conventions]\n\n" text += "=== Legacy IDs ===\n" keys = self.veidlist.keys() keys.sort() lastid = "" for id in keys: pagename = self.veidlist[id] id = str(id) companyid = id[0:2] if (not lastid.startswith(companyid)) and len(id) > 4: text += "=== " + companyid + " - " + self.companies[int(companyid)] + " ===\n" text += "* [[" + pagename + "|'''" + id + "''']]''':''' [[" + pagename + "]]\n" if len(id) > 4: lastid = id text += "[[Category:VE]]" # only save if something was changed if text == overviewPage.get(): return # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % overviewPage.title()) # show what was changed wikipedia.showDiff(overviewPage.get(), text) choice = 'y' if self.debug: choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') if choice == 'y': try: # Save the page overviewPage.put(text) except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked; skipping." % overviewPage.aslink()) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % (overviewPage.title())) except wikipedia.SpamfilterError, error: wikipedia.output(u'Cannot change %s because of spam blacklist entry %s' % (overviewPage.title(), error.url))
def main(): # this temporary array is used to read the page title. pageTitle = [] gen = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'windows_chars') if arg: if arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input( u'please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfilePageGenerator(filename) elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input( u'please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] gen = SqlWindows1252PageGenerator(sqlfilename) else: pageTitle.append(arg) # if a single page is given as a command line argument, # reconnect the title's parts with spaces if pageTitle != []: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) if not gen: wikipedia.showHelp('windows_chars') elif wikipedia.getSite().encoding() == "utf-8": print "There is no need to run this robot on UTF-8 wikis." else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = WindowsCharsBot(preloadingGen) bot.run()
def main(): # this temporary array is used to read the page title. pageTitle = [] gen = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'windows_chars') if arg: if arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input(u'please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfilePageGenerator(filename) elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] gen = SqlWindows1252PageGenerator(sqlfilename) else: pageTitle.append(arg) # if a single page is given as a command line argument, # reconnect the title's parts with spaces if pageTitle != []: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) if not gen: wikipedia.showHelp('windows_chars') elif wikipedia.getSite().encoding() == "utf-8": print "There is no need to run this robot on UTF-8 wikis." else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = WindowsCharsBot(preloadingGen) bot.run()
def loadJobQueue(self, page, queue_security, reset=True): """Check if the data queue security is ok to execute the jobs, if so read the jobs and reset the queue. @param page: Wiki page containing job queue. @type page: page @param queue_security: This string must match the last edit comment, or else nothing is done. @type queue_security: string Returns a list of jobs. This list may be empty. """ try: actual = page.getVersionHistory(revCount=1)[0] except: pass secure = False for item in queue_security[0]: secure = secure or (actual[2] == item) secure = secure and (actual[3] == queue_security[1]) if not secure: return [] data = self._REGEX_eol.split(page.get()) if reset: pywikibot.output(u'\03{lightblue}Job queue reset...\03{default}') pywikibot.setAction(u'reset job queue') page.put(u'', minorEdit=True) queue = [] for line in data: queue.append(line[1:].strip()) return queue
def run(self): comment = wikipedia.translate(self.site, msg) wikipedia.setAction(comment) for page in self.generator: # Show the title of the page we're working on. # Highlight the title in purple. wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) try: text = page.get() except wikipedia.NoPage: wikipedia.output(u"Page %s does not exist?!" % page.aslink()) continue except wikipedia.IsRedirectPage: wikipedia.output(u"Page %s is a redirect; skipping." % page.aslink()) continue except wikipedia.LockedPage: wikipedia.output(u"Page %s is locked?!" % page.aslink()) continue if self.lacksReferences(text): newText = self.addReferences(text) self.save(page, newText)
def __init__(self): self.wiki = self.coreWiki = wikipedia.getSite(code=u'en', fam=u'naruto') wikipedia.setAction(wikipedia.translate(self.wiki, self.getSummaries())) # This factory is responsible for processing command line arguments # that are also used by other scripts and that determine on which pages # to work on. genFactory = pagegenerators.GeneratorFactory() gen = None PageTitles = [] for arg in wikipedia.handleArgs(): if arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'\03{lightblue}Which page do you want to chage?\03{default}')) elif len(arg) > 6: PageTitles.append(arg[6:]) else: generator = genFactory.handleArg(arg) if generator: gen = generator if not gen and PageTitles: pages = [wikipedia.Page(self.wiki, PageTitle) for PageTitle in PageTitles] gen = iter(pages) self.generator = gen