def main(): args = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'are-identical') if arg: args.append(arg) g = TwoPageGenerator(*args) r = IdenticalRobot(g) r.run()
def getfn(): fns = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'getimages') if arg: fns.append(arg) if len(fns) == 0: fns.append(raw_input("Please enter a filename: ")) return fns
def main(): # this temporary array is used to read the page title. pageTitle = [] gen = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'windows_chars') if arg: if arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input( u'please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfilePageGenerator(filename) elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input( u'please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] gen = SqlWindows1252PageGenerator(sqlfilename) else: pageTitle.append(arg) # if a single page is given as a command line argument, # reconnect the title's parts with spaces if pageTitle != []: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) if not gen: wikipedia.showHelp('windows_chars') elif wikipedia.getSite().encoding() == "utf-8": print "There is no need to run this robot on UTF-8 wikis." else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = WindowsCharsBot(preloadingGen) bot.run()
def main(): # this temporary array is used to read the page title. pageTitle = [] gen = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'windows_chars') if arg: if arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input(u'please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfilePageGenerator(filename) elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] gen = SqlWindows1252PageGenerator(sqlfilename) else: pageTitle.append(arg) # if a single page is given as a command line argument, # reconnect the title's parts with spaces if pageTitle != []: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) if not gen: wikipedia.showHelp('windows_chars') elif wikipedia.getSite().encoding() == "utf-8": print "There is no need to run this robot on UTF-8 wikis." else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = WindowsCharsBot(preloadingGen) bot.run()
if error in errname: return errname[error] elif (error > 300) and (error < 400): return "Unknown Redirection Response" else: return "Unknown Error" start = "!" log = True todo = [] do_all = False for arg in sys.argv[1:]: url = sys.argv[1] arg = wikipedia.argHandler(arg, "check_extern") if arg: if arg.startswith("-start:"): start = arg[7:] do_all = True elif arg == "-nolog": log = False else: mysite = wikipedia.getSite() todo.append(wikipedia.Page(mysite, arg)) # Make sure we have the final site mysite = wikipedia.getSite() if todo == []: # No pages have been given; if also no start is given, we start at
L.append('Content-Type: %s' % get_content_type(filename)) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def get_content_type(filename): import mimetypes return mimetypes.guess_type(filename)[0] or 'application/octet-stream' for arg in sys.argv[1:]: if wikipedia.argHandler(arg): pass else: print "Unknown argument: ",arg sys.exit(1) if not wikipedia.special.has_key(wikipedia.mylang): print "Please add the translation for the Special: namespace in" print "Your home wikipedia to the wikipedia.py module" import sys sys.exit(1) if not wikipedia.cookies: print "You must be logged in to upload images" import sys sys.exit(1)
import re, sys import wikipedia myComment = {'ar':u'بوت: URL تم إصلاحها', 'en':u'Bot: URL fixed', 'fa':u'ربات: URL اصلاح شد', 'he':u'בוט: תוקנה כתובת URL', 'pt':u'Bot: URL corrigido', 'zh':u'機器人: 網址已修復', } if __name__ == "__main__": try: for arg in sys.argv[1:]: if wikipedia.argHandler(arg, 'brackethttp'): pass else: pl = wikipedia.Page(wikipedia.getSite(), arg) text = pl.get() newText = re.sub("(http:\/\/([^ ]*[^\] ]))\)", "[\\1 \\2])", text) if newText != text: wikipedia.showDiff(text, newText) status, reason, data = pl.put(newText, wikipedia.translate(wikipedia.mylang,myComment)) print status, reason else: print "No bad link found" except: wikipedia.stopme()
yield entry elif action == 'unmountedcats': for entry in sqldump.query_unmountedcats(): yield entry elif action == 'baddisambiguation': for entry in sqldump.entries(): if entry.namespace == 0 and entry.title.endswith(')') and entry.text.startswith("''") and not entry.text.startswith("'''"): yield entry if __name__=="__main__": wikipedia.stopme() # No need to have me on the stack, as I'm not contacting the wiki import sys action = None filename = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'sqldump') if arg: if arg.startswith('-sql'): if len(arg) == 4: filename = wikipedia.input(u'Please enter the SQL dump\'s filename: ') else: filename = arg[5:] else: action = arg if not filename or not action: wikipedia.output(__doc__, 'utf-8') else: sqldump = SQLdump(filename, wikipedia.myencoding()) for entry in query(sqldump, action): wikipedia.output(u'*[[%s]]' % entry.full_title())
if error in errname: return errname[error] elif (error > 300) and (error < 400): return 'Unknown Redirection Response' else: return 'Unknown Error' start = '!' log = True todo = [] do_all = False for arg in sys.argv[1:]: url = sys.argv[1] arg = wikipedia.argHandler(arg, 'check_extern') if arg: if arg.startswith('-start:'): start = arg[7:] do_all = True elif arg == '-nolog': log = False else: mysite = wikipedia.getSite() todo.append(wikipedia.Page(mysite, arg)) # Make sure we have the final site mysite = wikipedia.getSite() if todo == []: # No pages have been given; if also no start is given, we start at
import re, sys import wikipedia myComment = { 'ar': u'بوت: URL تم إصلاحها', 'en': u'Bot: URL fixed', 'he': u'בוט: תוקנה כתובת URL', 'pt': u'Bot: URL corrigido', 'zh': u'機器人: 網址已修復', } if __name__ == "__main__": try: for arg in sys.argv[1:]: if wikipedia.argHandler(arg, 'brackethttp'): pass else: pl = wikipedia.Page(wikipedia.getSite(), arg) text = pl.get() newText = re.sub("(http:\/\/([^ ]*[^\] ]))\)", "[\\1 \\2])", text) if newText != text: wikipedia.showDiff(text, newText) status, reason, data = pl.put( newText, wikipedia.translate(wikipedia.mylang, myComment)) print status, reason else:
if __name__ == "__main__": try: # if the -file argument is used, page titles are dumped in this array. # otherwise it will only contain one page. page_list = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] from_lang = "" type = "" debug = False copy_images = False # read command line parameters for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'copy_table') if arg: if arg.startswith("-from"): from_lang = arg[6:] elif arg.startswith("-type:"): type = arg[6:] elif arg == "-debug": debug = True elif arg == "-image": copy_images = True elif arg.startswith('-file'): if len(arg) == 5: file = wikipedia.input( u'Please enter the list\'s filename: ') else: file = arg[6:]
if __name__=="__main__": try: # if the -file argument is used, page titles are dumped in this array. # otherwise it will only contain one page. page_list = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] from_lang = "" type = "" debug = False copy_images = False # read command line parameters for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'copy_table') if arg: if arg.startswith("-from"): from_lang = arg[6:] elif arg.startswith("-type:"): type = arg[6:] elif arg == "-debug": debug = True elif arg == "-image": copy_images = True elif arg.startswith('-file'): if len(arg) == 5: file = wikipedia.input(u'Please enter the list\'s filename: ') else: file = arg[6:] # open file and read page titles out of it