コード例 #1
0
def main():
    args = []
    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'are-identical')
        if arg:
            args.append(arg)
    g = TwoPageGenerator(*args)
    r = IdenticalRobot(g)
    r.run()
コード例 #2
0
ファイル: are-identical.py プロジェクト: MrTweek/lanigiro
def main():
    args = []
    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'are-identical')
        if arg:
            args.append(arg)
    g = TwoPageGenerator(*args)
    r = IdenticalRobot(g)
    r.run()
コード例 #3
0
ファイル: getimages.py プロジェクト: dysklyver/pywikipediabot
def getfn():
    fns = []

    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'getimages')
        if arg:
            fns.append(arg)

    if len(fns) == 0:
        fns.append(raw_input("Please enter a filename: "))

    return fns
コード例 #4
0
def main():
    # this temporary array is used to read the page title.
    pageTitle = []
    gen = None

    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'windows_chars')
        if arg:
            if arg.startswith('-file'):
                if len(arg) == 5:
                    filename = wikipedia.input(
                        u'please enter the list\'s filename: ')
                else:
                    filename = arg[6:]
                gen = pagegenerators.TextfilePageGenerator(filename)
            elif arg.startswith('-sql'):
                if len(arg) == 4:
                    sqlfilename = wikipedia.input(
                        u'please enter the SQL dump\'s filename: ')
                else:
                    sqlfilename = arg[5:]
                gen = SqlWindows1252PageGenerator(sqlfilename)
            else:
                pageTitle.append(arg)

    # if a single page is given as a command line argument,
    # reconnect the title's parts with spaces
    if pageTitle != []:
        page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle))
        gen = iter([page])

    # get edit summary message
    wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg))

    if not gen:
        wikipedia.showHelp('windows_chars')
    elif wikipedia.getSite().encoding() == "utf-8":
        print "There is no need to run this robot on UTF-8 wikis."
    else:
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = WindowsCharsBot(preloadingGen)
        bot.run()
コード例 #5
0
def main():
    # this temporary array is used to read the page title.
    pageTitle = []
    gen = None

    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'windows_chars')
        if arg:
            if arg.startswith('-file'):
                if len(arg) == 5:
                    filename = wikipedia.input(u'please enter the list\'s filename: ')
                else:
                    filename = arg[6:]
                gen = pagegenerators.TextfilePageGenerator(filename)
            elif arg.startswith('-sql'):
                if len(arg) == 4:
                    sqlfilename = wikipedia.input(u'please enter the SQL dump\'s filename: ')
                else:
                    sqlfilename = arg[5:]
                gen = SqlWindows1252PageGenerator(sqlfilename)
            else:
                pageTitle.append(arg)

    # if a single page is given as a command line argument,
    # reconnect the title's parts with spaces
    if pageTitle != []:
        page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle))
        gen = iter([page])

    # get edit summary message
    wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg))

    if not gen:
        wikipedia.showHelp('windows_chars')
    elif wikipedia.getSite().encoding() == "utf-8":
        print "There is no need to run this robot on UTF-8 wikis."
    else:
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = WindowsCharsBot(preloadingGen)
        bot.run()
コード例 #6
0
ファイル: check_extern.py プロジェクト: pyropeter/PyroBot-1G
    if error in errname:
        return errname[error]
    elif (error > 300) and (error < 400):
        return "Unknown Redirection Response"
    else:
        return "Unknown Error"


start = "!"
log = True
todo = []
do_all = False

for arg in sys.argv[1:]:
    url = sys.argv[1]
    arg = wikipedia.argHandler(arg, "check_extern")
    if arg:
        if arg.startswith("-start:"):
            start = arg[7:]
            do_all = True
        elif arg == "-nolog":
            log = False
        else:
            mysite = wikipedia.getSite()
            todo.append(wikipedia.Page(mysite, arg))

# Make sure we have the final site
mysite = wikipedia.getSite()

if todo == []:
    # No pages have been given; if also no start is given, we start at
コード例 #7
0
ファイル: upload.py プロジェクト: karstenw/Library
        L.append('Content-Type: %s' % get_content_type(filename))
        L.append('')
        L.append(value)
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    return content_type, body

def get_content_type(filename):
    import mimetypes
    return mimetypes.guess_type(filename)[0] or 'application/octet-stream'


for arg in sys.argv[1:]:
    if wikipedia.argHandler(arg):
        pass
    else:
        print "Unknown argument: ",arg
        sys.exit(1)
        
if not wikipedia.special.has_key(wikipedia.mylang):
    print "Please add the translation for the Special: namespace in"
    print "Your home wikipedia to the wikipedia.py module"
    import sys
    sys.exit(1)

if not wikipedia.cookies:
    print "You must be logged in to upload images"
    import sys
    sys.exit(1)
コード例 #8
0
ファイル: brackethttp.py プロジェクト: MrTweek/lanigiro
import re, sys
import wikipedia

myComment = {'ar':u'بوت: URL تم إصلاحها',
             'en':u'Bot: URL fixed',
             'fa':u'ربات: URL اصلاح شد',
             'he':u'בוט: תוקנה כתובת URL',
             'pt':u'Bot: URL corrigido',
             'zh':u'機器人: 網址已修復',
             }

if __name__ == "__main__":
    try:
        for arg in sys.argv[1:]:
            if wikipedia.argHandler(arg, 'brackethttp'):
                pass
            else:
                pl = wikipedia.Page(wikipedia.getSite(), arg)
                text = pl.get()
        
                newText = re.sub("(http:\/\/([^ ]*[^\] ]))\)", "[\\1 \\2])", text)

                if newText != text:
                    wikipedia.showDiff(text, newText)
                    status, reason, data = pl.put(newText, wikipedia.translate(wikipedia.mylang,myComment))
                    print status, reason
                else:
                    print "No bad link found"
    except:
        wikipedia.stopme()
コード例 #9
0
ファイル: sqldump.py プロジェクト: MrTweek/lanigiro
            yield entry
    elif action == 'unmountedcats':
        for entry in sqldump.query_unmountedcats():
            yield entry
    elif action == 'baddisambiguation':
        for entry in sqldump.entries():
            if entry.namespace == 0 and entry.title.endswith(')') and entry.text.startswith("''") and not entry.text.startswith("'''"):
                yield entry

if __name__=="__main__":
    wikipedia.stopme() # No need to have me on the stack, as I'm not contacting the wiki
    import sys
    action = None
    filename = None
    for arg in sys.argv[1:]:
        arg = wikipedia.argHandler(arg, 'sqldump')
        if arg:
            if arg.startswith('-sql'):
                if len(arg) == 4:
                    filename = wikipedia.input(u'Please enter the SQL dump\'s filename: ')
                else:
                    filename = arg[5:]
            else:
                action = arg
    if not filename or not action:
        wikipedia.output(__doc__, 'utf-8')
    else:
        sqldump = SQLdump(filename, wikipedia.myencoding())
        
        for entry in query(sqldump, action):
            wikipedia.output(u'*[[%s]]' % entry.full_title())
コード例 #10
0
    if error in errname:
        return errname[error]
    elif (error > 300) and (error < 400):
        return 'Unknown Redirection Response'
    else:
        return 'Unknown Error'


start = '!'
log = True
todo = []
do_all = False

for arg in sys.argv[1:]:
    url = sys.argv[1]
    arg = wikipedia.argHandler(arg, 'check_extern')
    if arg:
        if arg.startswith('-start:'):
            start = arg[7:]
            do_all = True
        elif arg == '-nolog':
            log = False
        else:
            mysite = wikipedia.getSite()
            todo.append(wikipedia.Page(mysite, arg))

# Make sure we have the final site
mysite = wikipedia.getSite()

if todo == []:
    # No pages have been given; if also no start is given, we start at
コード例 #11
0
ファイル: brackethttp.py プロジェクト: vmorrisonwood/pywikia
import re, sys
import wikipedia

myComment = {
    'ar': u'بوت: URL تم إصلاحها',
    'en': u'Bot: URL fixed',
    'he': u'בוט: תוקנה כתובת URL',
    'pt': u'Bot: URL corrigido',
    'zh': u'機器人: 網址已修復',
}

if __name__ == "__main__":
    try:
        for arg in sys.argv[1:]:
            if wikipedia.argHandler(arg, 'brackethttp'):
                pass
            else:
                pl = wikipedia.Page(wikipedia.getSite(), arg)
                text = pl.get()

                newText = re.sub("(http:\/\/([^ ]*[^\] ]))\)", "[\\1 \\2])",
                                 text)

                if newText != text:
                    wikipedia.showDiff(text, newText)
                    status, reason, data = pl.put(
                        newText,
                        wikipedia.translate(wikipedia.mylang, myComment))
                    print status, reason
                else:
コード例 #12
0
if __name__ == "__main__":
    try:
        # if the -file argument is used, page titles are dumped in this array.
        # otherwise it will only contain one page.
        page_list = []
        # if -file is not used, this temporary array is used to read the page title.
        page_title = []
        from_lang = ""
        type = ""
        debug = False
        copy_images = False

        # read command line parameters
        for arg in sys.argv[1:]:
            arg = wikipedia.argHandler(arg, 'copy_table')
            if arg:
                if arg.startswith("-from"):
                    from_lang = arg[6:]
                elif arg.startswith("-type:"):
                    type = arg[6:]
                elif arg == "-debug":
                    debug = True
                elif arg == "-image":
                    copy_images = True
                elif arg.startswith('-file'):
                    if len(arg) == 5:
                        file = wikipedia.input(
                            u'Please enter the list\'s filename: ')
                    else:
                        file = arg[6:]
コード例 #13
0
ファイル: copy_table.py プロジェクト: MrTweek/lanigiro
if __name__=="__main__":
    try:
        # if the -file argument is used, page titles are dumped in this array.
        # otherwise it will only contain one page.
        page_list = []
        # if -file is not used, this temporary array is used to read the page title.
        page_title = []
        from_lang = ""
        type = ""
        debug = False
        copy_images = False

        # read command line parameters
        for arg in sys.argv[1:]:
            arg = wikipedia.argHandler(arg, 'copy_table')
            if arg:
                if arg.startswith("-from"):
                    from_lang = arg[6:]
                elif arg.startswith("-type:"):
                    type = arg[6:]
                elif arg == "-debug":
                    debug = True
                elif arg == "-image":
                    copy_images = True
                elif arg.startswith('-file'):
                    if len(arg) == 5:
                        file = wikipedia.input(u'Please enter the list\'s filename: ')
                    else:
                        file = arg[6:]
                    # open file and read page titles out of it