Esempio n. 1
0
    def __init__(self, generator, dry, always):
        """
        Constructor. Parameters:
            * generator - The page generator that determines on which pages
                          to work on.
            * dry       - If True, doesn't do any real changes, but only shows
                          what would have been changed.
            * always    - If True, don't prompt for each redirect page.
        """
        self.generator = generator
        self.dry = dry
        self.always = always
        self.lang = pywikibot.getSite().lang
        
        # Set the edit summary message
        self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
        self.templates = pywikibot.translate(pywikibot.getSite(), self.taxoboxTemplates)
        self.templateParameters = pywikibot.translate(pywikibot.getSite(), self.sciNameParameters)

        # Initialize the cache
        try:
            self.cache = pickle.load(file(self.cacheFilename, 'rb'))
        except:
            self.cache = {}
        if not self.lang in self.cache:
            self.cache[self.lang] = {}
Esempio n. 2
0
def updateWPlink(page, pageTemp):
    pageTemp = pageTemp.replace(
        "{{FULLPAGENAME}}",
        page.title())  #Nécessaire pour corriger les flemmards
    pageTemp = pageTemp.replace(
        "{{PAGENAME}}", page.title())  #Nécessaire pour corriger les flemmards
    wpPage = pywikibot.Page(pywikibot.getSite(page.site.lang, "wikipedia"),
                            page.title())
    wpLink = ''

    if wpPage.exists():
        wpLink = u"[[wp:" + wpPage.title() + "]]"

    m = re.search(r"\[\[wp\:(?P<ln>.*?)\]\]", pageTemp)

    if m != None:
        oldWpPage = pywikibot.Page(
            pywikibot.getSite(page.site.lang, "wikipedia"), m.group('ln'))
        if not oldWpPage.exists():
            pageTemp.replace(m.group(), wpLink)

    else:
        pageTemp += '\n' + wpLink

    return pageTemp
Esempio n. 3
0
def checkWait():
        newlist = ""  # blank variable for later
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        waiters = page.get()
        waiters = waiters.replace("}}", "")
        waiters = waiters.replace("*{{User|", "")
        waiters = waiters.split("\n")
        for waiter in waiters:
                if waiter == "":continue  # Non-existant user
                if checkRegisterTime(waiter, 7, False):continue
                if checkBlocked(waiter):continue  # If user is blocked, skip putting them back on the list.
                if getEditCount(waiter) == True:  # If edited, send them to UAA
                        checkUser(waiter, False, False)
                        continue
                if waiter in newlist:continue  # If user already in the list, in case duplicates run over
                # Continue if none of the other checks have issues with the conditions for staying on the waitlist
                newlist = newlist + "\n*{{User|" + waiter + "}}"
                # print "\n*{{User|" + waiter + "}}"
        summary = localconfig.editsumwait
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        pagetxt = page.get()
        newlist = newlist.replace("\n*{{User|}}", "")
        page.put(newlist, comment=summary)
Esempio n. 4
0
 def __init__(self, page, filename, summary, dry, always):
     self.page = pywikibot.Page(pywikibot.getSite(), page)
     self.filename = filename
     self.summary = summary
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction(self.summary)
Esempio n. 5
0
 def __iter__(self):
     """Yield page objects until the entire XML dump has been read."""
     from pywikibot import xmlreader
     mysite = pywikibot.getSite()
     dump = xmlreader.XmlDump(self.xmlfilename)
     # regular expression to find the original template.
     # {{vfd}} does the same thing as {{Vfd}}, so both will be found.
     # The old syntax, {{msg:vfd}}, will also be found.
     # TODO: check site.nocapitalize()
     templatePatterns = []
     for template in self.templates:
         templatePattern = template.titleWithoutNamespace()
         if not pywikibot.getSite().nocapitalize:
             templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
                                             templatePattern[0].lower(),
                                             templatePattern[1:])
         templatePattern = re.sub(' ', '[_ ]', templatePattern)
         templatePatterns.append(templatePattern)
     templateRegex = re.compile(
         r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}' %
         '|'.join(templatePatterns))
     for entry in dump.parse():
         if templateRegex.search(entry.text):
             page = pywikibot.Page(mysite, entry.title)
             yield page
def main():
    featured = False
    gen = None

    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg == '-featured':
            featured = True
        else:
            genFactory.handleArg(arg)

    mysite = pywikibot.getSite()
    if mysite.sitename() == 'wikipedia:nl':
        pywikibot.output(
            u'\03{lightred}There is consensus on the Dutch Wikipedia that bots should not be used to fix redirects.\03{default}')
        sys.exit()

    if featured:
        featuredList = i18n.translate(mysite, featured_articles)
        ref = pywikibot.Page(pywikibot.getSite(), featuredList)
        gen = pagegenerators.ReferringPageGenerator(ref)
        gen = pagegenerators.NamespaceFilterPageGenerator(gen, [0])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if gen:
        for page in pagegenerators.PreloadingGenerator(gen):
            workon(page)
    else:
        pywikibot.showHelp('fixing_redirects')
Esempio n. 7
0
 def __init__(self, pageToUnlink, namespaces, always):
     self.pageToUnlink = pageToUnlink
     gen = pagegenerators.ReferringPageGenerator(pageToUnlink)
     if namespaces != []:
         gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
     self.generator = pagegenerators.PreloadingGenerator(gen)
     linktrail = pywikibot.getSite().linktrail()
     # The regular expression which finds links. Results consist of four
     # groups:
     #
     # group title is the target page title, that is, everything
     # before | or ].
     #
     # group section is the page section.
     # It'll include the # to make life easier for us.
     #
     # group label is the alternative link title, that's everything
     # between | and ].
     #
     # group linktrail is the link trail, that's letters after ]] which are
     # part of the word.
     # note that the definition of 'letter' varies from language to language.
     self.linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?(\|(?P<label>[^\]]*))?\]\](?P<linktrail>%s)'
                             % linktrail)
     self.always = always
     self.done = False
     self.comment = i18n.twtranslate(pywikibot.getSite(), 'unlink-unlinking',
                                     self.pageToUnlink.title())
 def save_translation_from_bridge_language(self, infos):
     summary = "Dikan-teny avy amin'ny dikan-teny avy amin'i %(olang)s.wiktionary"%infos
     wikipage = self.output.wikipage(infos)
     try: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'])
     except UnicodeDecodeError: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'].decode('utf8'))
         
     try:
         if mg_Page.exists():
             pagecontent = mg_Page.get()
             if pagecontent.find('{{=%s=}}'%infos['lang'])!=-1:
                 if verbose: print "Efa misy ilay teny iditra."
                 self.output.db(infos)
                 return
             else:
                 wikipage += pagecontent
                 summary= u"+"+summary 
     except wikipedia.exceptions.IsRedirectPage:                
         infos['entry'] = mg_Page.getRedirectTarget().title()
         save_translation_from_bridge_language(self, infos, summary)
         return
     
     except wikipedia.exceptions.InvalidTitle:
         if verbose: print "lohateny tsy mety ho lohatenim-pejy"
         return
     
     except Exception:
         return
     
 
     if verbose: 
         wikipedia.output("\n \03{red}%(entry)s\03{default} : %(lang)s "%infos)
         wikipedia.output("\03{white}%s\03{default}"%wikipage)
     mg_Page.put_async(wikipage, summary)
Esempio n. 9
0
 def __init__(self, page, filename, summary, dry, always):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
	def getTranslatedStringForUser(self):
		"""
		Gets the local namespace name for User pages. e.g. Bruker on no.

		Uses pywikibot.

		API method:
			https://no.wikipedia.org/w/api.php?action=query&meta=siteinfo
				 &siprop=namespaces&format=json
		"""
		try:
			logging.info("Fetching User Namespace Name")
			format_language = self.language
			if '_' in format_language:
				wikiSite = pywikibot.getSite(format_language.replace('_','-'))
			else:
				wikiSite = pywikibot.getSite(self.language)
			#print wikiSite
			r = pywikibot.data.api.Request(
				site=wikiSite, action="query", meta="siteinfo")
			r['siprop'] = u'namespaces'
			data = r.submit()
			if self.language == 'pt':
				localized_user = data['query']['namespaces']['2']['*']
				return localized_user.split('(')[0]
			else:
				return data['query']['namespaces']['2']['*']
		except pywikibot.exceptions.NoSuchSite, e:
			logging.error(e)		
Esempio n. 11
0
 def __iter__(self):
     """Yield page objects until the entire XML dump has been read."""
     from pywikibot import xmlreader
     mysite = pywikibot.getSite()
     dump = xmlreader.XmlDump(self.xmlfilename)
     # regular expression to find the original template.
     # {{vfd}} does the same thing as {{Vfd}}, so both will be found.
     # The old syntax, {{msg:vfd}}, will also be found.
     # TODO: check site.nocapitalize()
     templatePatterns = []
     for template in self.templates:
         templatePattern = template.titleWithoutNamespace()
         if not pywikibot.getSite().nocapitalize:
             templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
                                             templatePattern[0].lower(),
                                             templatePattern[1:])
         templatePattern = re.sub(' ', '[_ ]', templatePattern)
         templatePatterns.append(templatePattern)
     templateRegex = re.compile(
         r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
                                % '|'.join(templatePatterns))
     for entry in dump.parse():
         if templateRegex.search(entry.text):
             page = pywikibot.Page(mysite, entry.title)
             yield page
Esempio n. 12
0
def getFullHistory(pageTitle, revEnd='', revStart=''):
    """Get all revision from start to end using API

    We get the full history starting at revStart (newest rvid) and ending at
    revEnd (oldest rvid). If revStart == -1 we just start at the current revision.

    We return an XML text string with all the requested revisions.
    Note that the maximum we are allowed to get is 50 pages with content.
    """
    predata = {
        'action': 'query',
        'format': 'xml',
        'prop': 'revisions',
        'rvprop': 'ids|timestamp|user|comment|content',
        'rvendid': str(revEnd),
        'titles': pageTitle.encode("utf8")
    }

    # sometimes we don't know the number of the newest revision
    # it should be set to -1 and will not be considered
    if not revStart == -1:
        predata['rvstartid'] = str(revStart)

    address = pywikibot.getSite().family.apipath(pywikibot.getSite().lang)
    return api.postForm(pywikibot.getSite(), address, predata=predata)
Esempio n. 13
0
def review(revisionID, comment='', getagain=False):
    """
    This function will review the given revision ID with the given comment.
    If getagain is set, it will get a new token (not necessary since it will
    try to get a new token automatically if the old one is broken)
    """
    # print('review will start id: %s - %s' % (revisionID, getagain))
    predata = {
        'action': 'review',
        'format': 'xml',
        'revid': str(revisionID),
        'token': pywikibot.getSite().getToken(getagain=getagain),
        'comment': comment,
    }

    address = pywikibot.getSite().family.apipath(pywikibot.getSite().lang)
    data = api.postForm(pywikibot.getSite(), address, predata)

    if data.find('review result=\"Success\"') == -1:
        if not data.find('Invalid token') == -1:
            if getagain:
                raise pywikibot.Error('Invalid Token')
            else:
                review(revisionID, comment='', getagain=True)
        else:
            raise pywikibot.Error('Review unsuccessful %s' % data)
Esempio n. 14
0
def main():
    startpage = 'Anarana iombonana amin\'ny teny malagasy'
    pages = pagegenerators.CategorizedPageGenerator(catlib.Category(pywikibot.getSite('mg','wiktionary'), startpage))
    for page in pages:
        pagename = page.title()
        try:
            t_p = page.get()
        except wikipedia.NoPage:
            print '  Tsy misy pejy.'
            t_p = ''
        except wikipedia.IsRedirectPage:
            print '  Pejy fihodinana.'
            continue
        except wikipedia.Error:
            print '  Hadisoana.'
            continue
        f_pages = traite(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho lohatenimpejy
        c_pages = tupleur(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho votoatimpejy
        cont = 0
        b = 0
        while cont <= 6:
            try:
                wikipedia.output((wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).get()))
                b += 1
                print b
                cont += 6

            except wikipedia.NoPage:
                try:
                    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).put(c_pages[cont])
                    cont = cont + 1
                except UnicodeDecodeError :
                    cont = cont + 1
                    continue
            if cont >= 6: break
Esempio n. 15
0
def undo(pagetitle, revisionOld, revisionNew, comment = '', getagain = False):
    """"
    This function will try to undo the page to the old revision ID
    If getagain is set, it will get a new token (not necessary since it will
    try to get a new token automatically if the old one is broken)
    """
    predata = {
            'action'        : 'edit',
            'format'        : 'xml',
            'title'         : pagetitle,
            'undo'          : str(revisionOld),
            'undoafter'     : str(revisionNew),
            'token'         : pywikibot.getSite().getToken(),
            'summary'       : comment,
    }
    address = pywikibot.getSite().family.apipath(pywikibot.getSite().lang)
    data = api.postForm(pywikibot.getSite(), address, predata=predata)
    dom = minidom.parseString(data.encode('utf8'))

    # print('undo data posted, page %s' % pagetitle)
    error = dom.getElementsByTagName('error')
    if not len(error) ==0:
        if not data.find('Invalid token') == -1:
            if getagain: raise pywikibot.Error('Invalid Token')
            else: undo(pagetitle, revisionOld, revisionNew, comment = '', getagain = True)
        else: raise pywikibot.Error('%s : %s' % (error[0].getAttribute('code'), error[0].getAttribute('info')))

    edit = dom.getElementsByTagName('edit')
    result = edit[0].getAttribute('result')
    newrevid = edit[0].getAttribute('newrevid')
    return result, newrevid
Esempio n. 16
0
	def addCats(self):
		text = u"""
[[Categorie:Filme românești]]
[[Categorie:Filme în limba română]]
"""
		if self._year:
			text += u"[[Categorie:Filme din %d]]\n" % self._year
		if self._director:
			directors = self._director.split(",")
			for director in directors:
				cat = u"Categorie:Filme regizate de %s" % director.strip()
				cat = pywikibot.Category(pywikibot.getSite(), cat)
				if cat.exists():
					text += u"[[Categorie:Filme regizate de %s]]\n" % director.strip()
		for t in self._types:
			cat = u"Filme de %s" % t.lower()
			catp = None
			if cat in categories:
				catp = pywikibot.Category(pywikibot.getSite(), categories[cat])
			if not catp or not catp.exists():
				catp = pywikibot.Category(pywikibot.getSite(), cat)

			if catp.exists():
				for p in catp.templatesWithParams():
					if p[0].title() == "Format:Redirect categorie":
						break
				else:
					text += u"[[%s]]\n" % catp.title()
		self._text += text
Esempio n. 17
0
 def __init__(self, pageToUnlink, namespaces, always):
     self.pageToUnlink = pageToUnlink
     gen = pagegenerators.ReferringPageGenerator(pageToUnlink)
     if namespaces != []:
         gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
     self.generator = pagegenerators.PreloadingGenerator(gen)
     linktrail = pywikibot.getSite().linktrail()
     # The regular expression which finds links. Results consist of four
     # groups:
     #
     # group title is the target page title, that is, everything
     # before | or ].
     #
     # group section is the page section.
     # It'll include the # to make life easier for us.
     #
     # group label is the alternative link title, that's everything
     # between | and ].
     #
     # group linktrail is the link trail, that's letters after ]] which are
     # part of the word.
     # note that the definition of 'letter' varies from language to language.
     self.linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?(\|(?P<label>[^\]]*))?\]\](?P<linktrail>%s)'
                             % linktrail)
     self.always = always
     self.done = False
     self.comment = i18n.twtranslate(pywikibot.getSite(), 'unlink-unlinking',
                                     self.pageToUnlink.title())
Esempio n. 18
0
def MakeAppendix(mot):
    verb = mot.title()
    form = """{{subst:-e-mat-vo|%s}}
[[sokajy:Volapoky/Matoanteny|%s]]"""%(verb[:-2], verb[0])
    
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Wiktionary:Raki-bolana volapoky/matoanteny/%s"%verb).put("#FIHODINANA [[Rakibolana:volapoky/matoanteny/%s]]"%verb)
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Rakibolana:volapoky/matoanteny/%s"%verb).put(form,'Matoanteny %s' %verb)
Esempio n. 19
0
def main():
    #page generator
    gen = None
    # This temporary array is used to read the page title if one single
    # page to work on is specified by the arguments.
    pageTitle = []
    # Which namespaces should be processed?
    # default to [] which means all namespaces will be processed
    namespaces = []
    # Never ask before changing a page
    always = False
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg.startswith('-xml'):
            if len(arg) == 4:
                xmlFilename = i18n.input('pywikibot-enter-xml-filename')
            else:
                xmlFilename = arg[5:]
            gen = XmlDumpNoReferencesPageGenerator(xmlFilename)
        elif arg.startswith('-namespace:'):
            try:
                namespaces.append(int(arg[11:]))
            except ValueError:
                namespaces.append(arg[11:])
        elif arg == '-always':
            always = True
        else:
            if not genFactory.handleArg(arg):
                pageTitle.append(arg)

    if pageTitle:
        page = pywikibot.Page(pywikibot.getSite(), ' '.join(pageTitle))
        gen = iter([page])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        site = pywikibot.getSite()
        try:
            cat = maintenance_category[site.family.name][site.lang]
        except:
            pass
        else:
            if not namespaces:
                namespaces = [0]
            cat = catlib.Category(site,
                                  "%s:%s" % (site.category_namespace(), cat))
            gen = pagegenerators.CategorizedPageGenerator(cat)
    if not gen:
        pywikibot.showHelp('noreferences')
    else:
        if namespaces:
            gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = NoReferencesBot(preloadingGen, always)
        bot.run()
Esempio n. 20
0
def main():
    #page generator
    gen = None
    # This temporary array is used to read the page title if one single
    # page to work on is specified by the arguments.
    pageTitle = []
    # Which namespaces should be processed?
    # default to [] which means all namespaces will be processed
    namespaces = []
    # Never ask before changing a page
    always = False
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg.startswith('-xml'):
            if len(arg) == 4:
                xmlFilename = i18n.input('pywikibot-enter-xml-filename')
            else:
                xmlFilename = arg[5:]
            gen = XmlDumpNoReferencesPageGenerator(xmlFilename)
        elif arg.startswith('-namespace:'):
            try:
                namespaces.append(int(arg[11:]))
            except ValueError:
                namespaces.append(arg[11:])
        elif arg == '-always':
            always = True
        else:
            if not genFactory.handleArg(arg):
                pageTitle.append(arg)

    if pageTitle:
        page = pywikibot.Page(pywikibot.getSite(), ' '.join(pageTitle))
        gen = iter([page])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        site = pywikibot.getSite()
        try:
            cat = maintenance_category[site.family.name][site.lang]
        except:
            pass
        else:
            if not namespaces:
                namespaces = [0]
            cat = pywikibot.Category(site, "%s:%s" % (
                site.category_namespace(), cat))
            gen = pagegenerators.CategorizedPageGenerator(cat)
    if not gen:
        pywikibot.showHelp('noreferences')
    else:
        if namespaces:
            gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = NoReferencesBot(preloadingGen, always)
        bot.run()
Esempio n. 21
0
def main():
    index = None
    djvu = None
    pages = None
    # what would have been changed.
    ask = False
    overwrite = 'ask'

    # Parse command line arguments
    for arg in pywikibot.handleArgs():
        if arg.startswith("-ask"):
            ask = True
        elif arg.startswith("-overwrite:"):
            overwrite = arg[11:12]
            if overwrite != 'y' and overwrite != 'n':
                pywikibot.output(
                    u"Unknown argument %s; will ask before overwriting" % arg)
                overwrite = 'ask'
        elif arg.startswith("-djvu:"):
            djvu = arg[6:]
        elif arg.startswith("-index:"):
            index = arg[7:]
        elif arg.startswith("-pages:"):
            pages = arg[7:]
        else:
            pywikibot.output(u"Unknown argument %s" % arg)

    # Check the djvu file exists
    if djvu:
        os.stat(djvu)

        if not index:
            import os.path
            index = os.path.basename(djvu)

    if djvu and index:
        site = pywikibot.getSite()
        index_page = pywikibot.Page(site, index)

        if site.family.name != 'wikisource':
            raise pywikibot.PageNotFound(
                u"Found family '%s'; Wikisource required." % site.family.name)

        if not index_page.exists() and index_page.namespace() == 0:
            index_namespace = site.mediawiki_message(
                'Proofreadpage index namespace')

            index_page = pywikibot.Page(pywikibot.getSite(),
                                        u"%s:%s" % (index_namespace, index))
        if not index_page.exists():
            raise pywikibot.NoPage(u"Page '%s' does not exist" % index)
        pywikibot.output(u"uploading text from %s to %s"
                         % (djvu, index_page.title(asLink=True)))
        bot = DjVuTextBot(djvu, index, pages, ask, overwrite)
        if not bot.has_text():
            raise ValueError("No text layer in djvu file")
        bot.run()
    else:
        pywikibot.showHelp()
 def __init__(self, myscraper, testing=False):
     self.myscraper = myscraper
     self.testing = testing
     if testing:
         self.destination_site = pywikibot.getSite("test", "test")
     else:
         self.destination_site = pywikibot.getSite("commons", "commons")
     print self.destination_site
Esempio n. 23
0
 def __init__(self, page, filename, summary, overwrite):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     self.overwrite = overwrite
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
 def __init__(self, myscraper, testing=False):
     self.myscraper = myscraper
     self.testing = testing
     if testing:
         self.destination_site = pywikibot.getSite('test', 'test')
     else:
         self.destination_site = pywikibot.getSite('commons', 'commons')
     print self.destination_site
Esempio n. 25
0
def Link(cat):
    site = wikipedia.getSite('mg','wiktionary')
    print cat
    pages = pagegenerators.CategorizedPageGenerator(catlib.Category(site, cat))
    checkpages = list()


    for page in pages:
        pagename = page.title()
        page_c = content = page.get()
        wikipedia.output('  >>> %s <<< '%pagename)
        try:
            defs = page.get().split('#')
            if len(defs)>2:
                continue
            else:
                defs = defs[1]
        except Exception:
            continue
        g = defs.find('\n')

        if g != -1:
            defs = defs[:g]
        else:
            pass
        if len(defs) < 2: continue
        for char in u'[]':
            defs = defs.replace(char, u'')
        wikipedia.output('\03{red}%s\03{default}'%defs)
        defs = defs.strip()
        linked_def = dolink(defs)
        wikipedia.output('\03{blue}%s\03{default}'%linked_def)
        if len(linked_def) < 1: continue

        page_c = page_c.replace(defs, linked_def)

        for link in re.findall('\[\[\[A-Za-z][\]|\[]?\]\]', page_c):
            wikipedia.output('\03{green}%s\03{default}'%link)
            page_c = page_c.replace(delink(link), linkfix(link))



        wikipedia.showDiff(page.get(), page_c)
        if (len(content) != len(page_c)):
            if checkbracks(page_c):
                pass#page.put_async(page_c, 'fanisiana rohy')
				
            else:
                print "Ilainao ahitsiana ny rohy eo amin'io pejy io"
                checkpages.append(page.title())
                pass
    c = "== Lisitry ny pejy mila tsaraina ny {{subst:CURRENTDAY}} {{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}} ==\n"
    for page in checkpages:
        wikipedia.output('\03{orange}%s\03'%page)
        c+= "[[%s]]\n"%page
    p = wikipedia.Page(wikipedia.getSite('mg','wiktionary'),
                   "Mpikambana:Jagwar/pejy mila tsaraina")
    pass#p.put_async(c)
def addCoords(countryconfig, monument, coordconfig):
    '''
    Add the coordinates to article.
    '''
    countrycode = countryconfig.get('country')
    lang = countryconfig.get('lang')
    if (countrycode and lang):
        coordTemplate = coordconfig.get('coordTemplate')
        coordTemplateSyntax = coordconfig.get('coordTemplateSyntax')
        site = pywikibot.getSite(lang, 'wikipedia')

        page = pywikibot.Page(site, monument.article)
        try:
            text = page.get()
        except pywikibot.NoPage:  # First except, prevent empty pages
            return False
        except pywikibot.IsRedirectPage:  # second except, prevent redirect
            pywikibot.output(u'%s is a redirect!' % monument.article)
            return False
        except pywikibot.Error:  # third exception, take the problem and print
            pywikibot.output(u"Some error, skipping..")
            return False

        if coordTemplate in page.templates():
            return False

        newtext = text
        replCount = 1
        coordText = coordTemplateSyntax % (monument.lat, monument.lon,
                                           countrycode.upper())
        localCatName = pywikibot.getSite().namespace(WP_CATEGORY_NS)
        catStart = r'\[\[(' + localCatName + '|Category):'
        catStartPlain = u'[[' + localCatName + ':'
        replacementText = u''
        replacementText = coordText + '\n\n' + catStartPlain

        # insert coordinate template before categories
        newtext = re.sub(catStart, replacementText, newtext, replCount, flags=re.IGNORECASE)

        if text != newtext:
            try:
                source_link = common.get_source_link(
                    monument.source,
                    countryconfig.get('type'))
            except ValueError:
                source_link = ''
            comment = u'Adding template %s based on %s, # %s' % (coordTemplate, source_link, monument.id)
            pywikibot.showDiff(text, newtext)
            modPage = pywikibot.input(u'Modify page: %s ([y]/n) ?' % (monument.article))
            if (modPage.lower == 'y' or modPage == ''):
                page.put(newtext, comment)
            return True
        else:
            return False
    else:
        return False
Esempio n. 27
0
    def categories(self):
        for page in self.generator:
            try:
                pywikibot.output(u'\n>>>> %s <<<<' % page.title())
                commons = pywikibot.getSite().image_repository()
                commonsCategory = pywikibot.Category(commons,
                                                     'Category:%s' % page.title())
                try:
                    getcommonscat = commonsCategory.get(get_redirect=True)
                    commonsCategoryTitle = commonsCategory.title()
                    categoryname = commonsCategoryTitle.split('Category:', 1)[1]
                    if page.title() == categoryname:
                        oldText = page.get()
                        text = oldText

                        # for commonscat template
                        findTemplate = re.compile(ur'\{\{[Cc]ommons')
                        s = findTemplate.search(text)
                        findTemplate2 = re.compile(ur'\{\{[Ss]isterlinks')
                        s2 = findTemplate2.search(text)
                        if s or s2:
                            pywikibot.output(u'** Already done.')
                        else:
                            text = pywikibot.replaceCategoryLinks(
                                text + u'{{commonscat|%s}}' % categoryname,
                                list(page.categories()))
                            if oldText != text:
                                pywikibot.showDiff(oldText, text)
                                if not self.acceptall:
                                    choice = pywikibot.inputChoice(
                                        u'Do you want to accept these changes?',
                                        ['Yes', 'No', 'All'], ['y', 'N', 'a'],
                                        'N')
                                    if choice == 'a':
                                        self.acceptall = True
                                if self.acceptall or choice == 'y':
                                    try:
                                        msg = i18n.twtranslate(
                                            pywikibot.getSite(), 'commons_link-cat-template-added')
                                        page.put(text, msg)
                                    except pywikibot.EditConflict:
                                        pywikibot.output(
                                            u'Skipping %s because of edit '
                                            u'conflict'
                                            % (page.title()))

                except pywikibot.NoPage:
                    pywikibot.output(u'Category does not exist in Commons!')

            except pywikibot.NoPage:
                pywikibot.output(u'Page %s does not exist?!' % page.title())
            except pywikibot.IsRedirectPage:
                pywikibot.output(u'Page %s is a redirect; skipping.'
                                 % page.title())
            except pywikibot.LockedPage:
                pywikibot.output(u'Page %s is locked?!' % page.title())
Esempio n. 28
0
def getLanguageLinks(text, insite=None, pageLink="[[]]",
                     template_subpage=False):
    """
    Return a dict of interlanguage links found in text.

    Dict uses language codes as keys and Page objects as values.
    Do not call this routine directly, use Page.interwiki() method
    instead.

    """
    if insite is None:
        insite = pywikibot.getSite()
    fam = insite.family
    # when interwiki links forward to another family, retrieve pages & other
    # infos there
    if fam.interwiki_forward:
        fam = pywikibot.Family(fam.interwiki_forward)
    result = {}
    # Ignore interwiki links within nowiki tags, includeonly tags, pre tags,
    # and HTML comments
    tags = ['comments', 'nowiki', 'pre', 'source']
    if not template_subpage:
        tags += ['includeonly']
    text = removeDisabledParts(text, tags)

    # This regular expression will find every link that is possibly an
    # interwiki link.
    # NOTE: language codes are case-insensitive and only consist of basic latin
    # letters and hyphens.
    # TODO: currently, we do not have any, but BCP 47 allows digits, and
    #       underscores.
    # TODO: There is no semantic difference between hyphens and
    #       underscores -> fold them.
    interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]')
    for lang, pagetitle in interwikiR.findall(text):
        lang = lang.lower()
        # Check if it really is in fact an interwiki link to a known
        # language, or if it's e.g. a category tag or an internal link
        if lang in fam.obsolete:
            lang = fam.obsolete[lang]
        if lang in fam.langs.keys():
            if '|' in pagetitle:
                # ignore text after the pipe
                pagetitle = pagetitle[:pagetitle.index('|')]
            # we want the actual page objects rather than the titles
            site = pywikibot.getSite(code=lang, fam=fam)
            try:
                result[site] = pywikibot.Page(site, pagetitle, insite=insite)
            except pywikibot.InvalidTitle:
                pywikibot.output(u'[getLanguageLinks] Text contains invalid '
                                 u'interwiki link [[%s:%s]].'
                                 % (lang, pagetitle))
                continue
    return result
Esempio n. 29
0
def getLanguageLinks(text, insite=None, pageLink="[[]]",
                     template_subpage=False):
    """
    Return a dict of interlanguage links found in text.

    Dict uses language codes as keys and Page objects as values.
    Do not call this routine directly, use Page.interwiki() method
    instead.

    """
    if insite is None:
        insite = pywikibot.getSite()
    fam = insite.family
    # when interwiki links forward to another family, retrieve pages & other
    # infos there
    if fam.interwiki_forward:
        fam = pywikibot.site.Family(fam.interwiki_forward)
    result = {}
    # Ignore interwiki links within nowiki tags, includeonly tags, pre tags,
    # and HTML comments
    tags = ['comments', 'nowiki', 'pre', 'source']
    if not template_subpage:
        tags += ['includeonly']
    text = removeDisabledParts(text, tags)

    # This regular expression will find every link that is possibly an
    # interwiki link.
    # NOTE: language codes are case-insensitive and only consist of basic latin
    # letters and hyphens.
    # TODO: currently, we do not have any, but BCP 47 allows digits, and
    #       underscores.
    # TODO: There is no semantic difference between hyphens and
    #       underscores -> fold them.
    interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]')
    for lang, pagetitle in interwikiR.findall(text):
        lang = lang.lower()
        # Check if it really is in fact an interwiki link to a known
        # language, or if it's e.g. a category tag or an internal link
        if lang in fam.obsolete:
            lang = fam.obsolete[lang]
        if lang in list(fam.langs.keys()):
            if '|' in pagetitle:
                # ignore text after the pipe
                pagetitle = pagetitle[:pagetitle.index('|')]
            # we want the actual page objects rather than the titles
            site = pywikibot.getSite(code=lang, fam=fam)
            try:
                result[site] = pywikibot.Page(site, pagetitle, insite=insite)
            except pywikibot.InvalidTitle:
                pywikibot.output(u'[getLanguageLinks] Text contains invalid '
                                 u'interwiki link [[%s:%s]].'
                                 % (lang, pagetitle))
                continue
    return result
Esempio n. 30
0
def main():
    text = """{{Proiect:Aniversările zilei/Antet}}
Tabelul de mai jos conține informații despre erorile găsite în datele de naștere și deces ale personalităților menționate în paginile zilelor și ale anilor. Comparația se face între listele de pe Wikipedia și elementele Wikidata ale personalităților respective.

Legendă:
* liniile cu fundal <span style="background-color:#ff8888">roșu</span> reprezintă nepotriviri certe (datele sunt complete în ambele părți, dar nu se potrivesc)
* liniile cu fundal <span style="background-color:#ffff88">galben</span> reprezintă intrări unde Wikidata nu are date complete
* liniile cu fundal <span style="background-color:#88ffff">albastru</span> reprezintă intrări unde Wikidata are mai multe date posibile, toate cu același rank
* liniile cu fundal <span style="background-color:#88ff88">verde</span> reprezintă diferențe de calendar (gregorian vs. julian) 

Scorul este alocat automat pe baza numărului de posibile date de naștere de la wikidata (%d/dată) și pe baza numărului de surse ce susțin data aleasă de algoritm (+%d/sursă, 0 dacă nu este aleasă nicio dată). Scorul are rolul de a prioritiza rezolvarea problemelor ușoare. '''Scor mai mare înseamnă încredere mai mare în datele de la Wikidata'''.
{| class=\"wikitable sortable\"
! Secțiune
! Articol
! Pagină aniversări
! Dată Wikipedia
! Item Wikidata
! Dată Wikidata
! Scor
""" % (MULTIPLE_DATE_PENALTY, MULTIPLE_SOURCES_BONUS)
    #day = 4
    #month = "octombrie"
    #event = "Nașteri"
    #page = pywikibot.Page(pywikibot.getSite(),  "%d %s" % (day, month))
    #import pdb
    #pdb.set_trace()
    #treat(page, day, month, event)
    #return
    for year in range(1901, time.localtime().tm_year):
        for suffix in ["", " î.Hr."]:
            page = pywikibot.Page(pywikibot.getSite(),  "%d%s" % (year, suffix))
            if not page.exists():
                continue
            if page.isRedirectPage():
                page = page.getRedirectTarget()
            if suffix != "":
                year = -year
            for event in sections.keys():
                text += treat_year(page, year, suffix, event)
    for month in months:
        for day in range(1,32):
            page = pywikibot.Page(pywikibot.getSite(),  "%d %s" % (day, month))
            if not page.exists():
                continue
            if page.isRedirectPage():
                page = page.getRedirectTarget()
            for event in sections.keys():
                text += treat_date(page, day, month, event)


    page = pywikibot.Page(pywikibot.getSite(), "Proiect:Aniversări/Erori")
    page.put(text + "|}", "Actualizare nepotriviri")
Esempio n. 31
0
def set_event_text(page, day, month, year, text, comment):
    if type(month) == int:
        month = months[month - 1]
    if not page:
        if year:
            page = pywikibot.Page(pywikibot.getSite(),  "%d" % year)
        else:
            page = pywikibot.Page(pywikibot.getSite(),  "%d %s" % (day, month))
    try:
        page.put(text, comment)
    except pywikibot.PageNotSaved:
        return False
    return True
Esempio n. 32
0
def main(*args):
    password = None
    sysop = False
    logall = False
    logout = False
    for arg in pywikibot.handleArgs(*args):
        if arg.startswith("-pass"):
            if len(arg) == 5:
                password = pywikibot.input(u'Password for all accounts (no characters will be shown):',
                                           password=True)
            else:
                password = arg[6:]
        elif arg == "-sysop":
            sysop = True
        elif arg == "-all":
            logall = True
        elif arg == "-force":
            pywikibot.output(u"To force a re-login, please delete the revelant lines from '%s' (or the entire file) and try again." %
                             join(config.base_dir, 'pywikibot.lwp'))
        elif arg == "-logout":
            logout = True
        else:
            pywikibot.showHelp('login')
            return
    if logall:
        if sysop:
            namedict = config.sysopnames
        else:
            namedict = config.usernames
    else:
        site = pywikibot.getSite()
        namedict = {site.family.name: {site.code: None}}
    for familyName in namedict:
        for lang in namedict[familyName]:
            try:
                site = pywikibot.getSite(code=lang, fam=familyName)
                if logout:
                    site.logout()
                else:
                    site.login()
                user = site.user()
                if user:
                    pywikibot.output(u"Logged in on %(site)s as %(user)s." % locals())
                else:
                    if logout:
                        pywikibot.output(u"Logged out of %(site)s." % locals())
                    else:
                        pywikibot.output(u"Not logged in on %(site)s." % locals())
            except NoSuchSite:
                pywikibot.output(u'%s.%s is not a valid site, please remove it'
                                 u' from your config' % (lang, familyName))
Esempio n. 33
0
 def __init__(self, task):
     self.site = pywikibot.getSite()
     self.trial = False
     self.trial_counter = 0
     self.trial_max = 0
     self.summary = None
     self.username = self.site.username()
     self.CONFIGURATION_PAGE = CONFIGURATION_PAGE % self.username
     self.task = task
     self.site = pywikibot.getSite()
     self.loggingEnabled = False
     self.counter = 0
     self.CHECK_CONFIG_PAGE_EVERY = CHECK_CONFIG_PAGE_EVERY
     self.args = pywikibot.handleArgs()
Esempio n. 34
0
 def __init__(self, task):
     self.site = pywikibot.getSite()
     self.trial = False
     self.trial_counter = 0
     self.trial_max = 0
     self.summary = None
     self.username = self.site.username()
     self.CONFIGURATION_PAGE = CONFIGURATION_PAGE % self.username
     self.task = task
     self.site = pywikibot.getSite()
     self.loggingEnabled = False
     self.counter = 0
     self.CHECK_CONFIG_PAGE_EVERY = CHECK_CONFIG_PAGE_EVERY
     self.args = pywikibot.handleArgs()
Esempio n. 35
0
def main():

    wikiSite = pywikibot.getSite(u'et', u'wikipedia')
    wikiPageName = u'Kasutaja:KrattBot/Muis.ee-st Commonsisse kopeerimiseks esitatud pildid'
    galPageName = u'Kasutaja:KrattBot/Muis.ee-st Commonsisse kopeeritud pildid'  
    muisUrls = getMuisUrls(wikiSite, wikiPageName)

    uploadSite = pywikibot.getSite('commons', 'commons')

    # print muisUrls
    uploadedFiles = []
    for muisUrl in muisUrls:
        answer = pywikibot.inputChoice(u'Include image %s?'
                                       % muisUrl, ['yes', 'no', 'stop'],
                                       ['Y', 'n', 's'], 'Y')
        if answer == 'y':
            aImage = getImage(muisUrl)
            if aImage:
                upFile = None
                downloadedImage = downloadPhoto(aImage.url)

                duplicates = findDuplicateImages(downloadedImage)
                if duplicates:
                    pywikibot.output(u'Found duplicate of %s image at %s' % ( muisUrl, duplicates.pop() ) )
                else:            
            
                    while True:
                        cat = pywikibot.input(
                            u"Specify a category (or press enter to end adding categories)")
                        if not cat.strip(): break
                        aImage.categories.append( cat )

                    uploadBot = upload.UploadRobot(aImage.url,
                                    description=aImage.getFullDesc(),
                                    useFilename=aImage.name,
                                    keepFilename=True,
                                    verifyDescription=False,
                                    ignoreWarning=False, 
                                    targetSite=uploadSite)
                    #print aImage.getFullDesc()
                    upFile = uploadBot.run()
                    if upFile:
                        uploadedFiles.append(upFile)
        elif answer == 's':
            break

    if uploadedFiles:
        addToGallery(wikiSite, galPageName, uploadedFiles)
Esempio n. 36
0
def main():
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()
    # The generator gives the pages that should be worked upon.
    gen = None
    # This temporary array is used to read the page title if one single
    # page to work on is specified by the arguments.
    pageTitleParts = []
    # If dry is True, doesn't do any real changes, but only show
    # what would have been changed.
    dry = False
    # If always is True, don't confirm changes.
    always = False

    # Parse command line arguments
    for arg in pywikibot.handleArgs():
        if arg.startswith("-dry"):
            dry = True
        elif arg.startswith("-always"):
            always = True
        elif arg.startswith("-all"):
            genFactory.handleArg('-namespace:0')
            for tmpl in pywikibot.translate(pywikibot.getSite(), SciNameBot.taxoboxTemplates):
                genFactory.handleArg('-transcludes:%s' % tmpl)
        else:
            # check if a standard argument like
            # -start:XYZ or -ref:Asdf was given.
            if not genFactory.handleArg(arg):
                pageTitleParts.append(arg)

    if pageTitleParts != []:
        # We will only work on a single page.
        pageTitle = ' '.join(pageTitleParts)
        page = pywikibot.Page(pywikibot.getSite(), pageTitle)
        gen = iter([page])

    if not gen:
        gen = genFactory.getCombinedGenerator()
    if gen:
        # The preloading generator is responsible for downloading multiple
        # pages from the wiki simultaneously.
        gen = pagegenerators.PreloadingGenerator(gen)
        bot = SciNameBot(gen, dry, always)
        bot.run()
    else:
        pywikibot.showHelp()
Esempio n. 37
0
def main(args):
    """ Grab a bunch of images and tag them if they are not categorized. """
    generator = None
    genFactory = pagegenerators.GeneratorFactory()

    site = pywikibot.getSite(u'commons', u'commons')
    pywikibot.setSite(site)
    for arg in pywikibot.handleArgs():
        if arg.startswith('-yesterday'):
            generator = uploadedYesterday(site)
        elif arg.startswith('-recentchanges'):
            generator = recentChanges(site=site, delay=120)
        else:
            genFactory.handleArg(arg)
    if not generator:
        generator = genFactory.getCombinedGenerator()
    if not generator:
        pywikibot.output(u'You have to specify the generator you want to use '
                         u'for the program!')
    else:
        pregenerator = pagegenerators.PreloadingGenerator(generator)
        for page in pregenerator:
            if page.exists() and (page.namespace() == 6) \
               and (not page.isRedirectPage()):
                if isUncat(page):
                    addUncat(page)
Esempio n. 38
0
 def setpage(self):
     """Sets page and page title"""
     site = pywikibot.getSite()
     pageTitle = self.options.page or pywikibot.input(u"Page to edit:")
     self.page = pywikibot.Page(pywikibot.Link(pageTitle, site))
     if not self.options.edit_redirect and self.page.isRedirectPage():
         self.page = self.page.getRedirectTarget()
Esempio n. 39
0
def job_thread(queue, cache):
    while True:
        title, codelang, user, t, tools, conn = queue.get()

        time1 = time.time()
        out = ''
        try:
            mysite = pywikibot.getSite(codelang, 'wikisource')
        except:
            out = ret_val(E_ERROR, "site error: " + repr(codelang))
            mysite = False

        if mysite:
            out = do_extract(mysite, title, user, codelang, cache)

        if tools and conn:
            tools.send_reply(conn, out)
            conn.close()

        time2 = time.time()
        print(
            date_s(time2) + title + ' ' + user + " " + codelang + " (%.2f)" %
            (time2 - time1)).encode('utf-8')

        queue.remove()
Esempio n. 40
0
    def run(self):
        comment = i18n.twtranslate(self.site, 'noreferences-add-tag')
        pywikibot.setAction(comment)

        for page in self.generator:
            # Show the title of the page we're working on.
            # Highlight the title in purple.
            pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" %
                             page.title())
            try:
                text = page.get()
            except pywikibot.NoPage:
                pywikibot.output(u"Page %s does not exist?!" %
                                 page.title(asLink=True))
                continue
            except pywikibot.IsRedirectPage:
                pywikibot.output(u"Page %s is a redirect; skipping." %
                                 page.title(asLink=True))
                continue
            except pywikibot.LockedPage:
                pywikibot.output(u"Page %s is locked?!" %
                                 page.title(asLink=True))
                continue
            if pywikibot.getSite().sitename(
            ) == 'wikipedia:en' and page.isIpEdit():
                pywikibot.output(
                    u"Page %s is edited by IP. Possible vandalized" %
                    page.title(asLink=True))
                continue
            if self.lacksReferences(text):
                newText = self.addReferences(text)
                self.save(page, newText)
Esempio n. 41
0
 def __iter__(self):
     import xmlreader
     dump = xmlreader.XmlDump(self.xmlFilename)
     for entry in dump.parse():
         text = pywikibot.removeDisabledParts(entry.text)
         if self.refR.search(text) and not self.referencesR.search(text):
             yield pywikibot.Page(pywikibot.getSite(), entry.title)
Esempio n. 42
0
 def setpage(self):
     """Sets page and page title"""
     site = pywikibot.getSite()
     pageTitle = self.options.page or pywikibot.input(u"Page to edit:")
     self.page = pywikibot.Page(site, pageTitle)
     if not self.options.edit_redirect and self.page.isRedirectPage():
         self.page = self.page.getRedirectTarget()
Esempio n. 43
0
def getAllUnreviewedinCat(cat_name, depth=1, sortby='title', exclude=''):
    """Get all unreviewed pages in a category
    
    This function uses the toolserver to get these pages, using the tool by hroest 
    """

    webpage = hroest_url + 'flagged.php'
    cat_temp = Template('%s?category=$cat&depth=$depth' % webpage +
                        '&sortby=$sortby&exclude=$exclude&doit=Los!')
    webpage = cat_temp.substitute(cat=urllib.quote_plus(cat_name),
                                  depth=depth,
                                  sortby=sortby,
                                  exclude=exclude)
    data = send_request(webpage)

    numberArticles = '(\d*) nachzusichtende Artikel gefunden.'
    m = re.search(numberArticles, data)
    number = int(m.group(1))
    data = data[:m.end()]

    unstable = []
    for q in re.finditer("target\" href=\"http://de.wikipedia.org/" + \
            "w/index.php\?title=(.*?)\&diffonly=\d*\&oldid=(\d*)", data):
        thisTitle = q.group(1)
        thisID = -1
        stable_rev = q.group(2)
        last_rev = -1
        pending_since = -1
        page = UnreviewedPage(pywikibot.Page(pywikibot.getSite(),
                                             thisTitle), thisTitle, thisID,
                              stable_rev, last_rev, pending_since)
        unstable.append(page)

    return unstable
Esempio n. 44
0
def getAllUnreviewed():
    """Get all unreviewed pages

    This function uses the toolserver to get these pages, using the tool by hroest 
    """

    webpage = hroest_url + 'cgi-bin/bot_only/all_unflagged.py'
    try:
        data = send_request(webpage)
    except:
        #try again, at least now the query is in MySQL cache
        data = send_request(webpage)

    articles = data.split('\n')
    numberArticles = len(articles)

    unstable = []
    for a in articles:
        if len(a) == 0: continue
        mysplit = a.split('||;;;')
        thisTitle = mysplit[0].strip()
        thisID = mysplit[1].strip()
        stable_rev = mysplit[2].strip()
        last_rev = mysplit[3].strip()
        pending_since = mysplit[4].strip()
        unstable.append([
            pywikibot.Page(pywikibot.getSite(), thisTitle), thisTitle, thisID,
            stable_rev, last_rev, pending_since
        ])
    return unstable, [], []
Esempio n. 45
0
def _magnus_getAllUnreviewedinCat(cat_name, depth=1):
    """Get all unreviewed pages in a category; using magnus tool
    """

    magnus = 'http://toolserver.org/~magnus/deep_out_of_sight.php'
    cat_temp = Template('%s?category=$cat&depth=$depth&doit=Los!' % magnus)
    webpage = cat_temp.substitute(cat=cat_name, depth=depth)
    data = send_request(webpage)

    numberArticles = '(\d*) nachzusichtende Artikel gefunden.'
    m = re.search(numberArticles, data)
    number = int(m.group(1))
    data = data[:m.end()]

    unstable = []
    for q in re.finditer("target\" href=\"http://de.wikipedia.org/" + \
            "w/index.php\?title=(.*?)\&diffonly=\d*\&oldid=(\d*)", data):
        thisTitle = q.group(1)
        thisID = -1
        stable_rev = q.group(2)
        last_rev = -1
        pending_since = -1
        unstable.append([
            pywikibot.Page(pywikibot.getSite(), thisTitle), thisTitle, thisID,
            stable_rev, last_rev, pending_since
        ])

    return unstable, [], []
Esempio n. 46
0
def main():
    #page generator
    gen = None
    # If the user chooses to work on a single page, this temporary array is
    # used to read the words from the page title. The words will later be
    # joined with spaces to retrieve the full title.
    pageTitle = []
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if not genFactory.handleArg(arg):
            pageTitle.append(arg)

    if pageTitle:
        # work on a single page
        page = pywikibot.Page(pywikibot.getSite(), ' '.join(pageTitle))
        gen = iter([page])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        pywikibot.showHelp('inline_images')
    else:
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = InlineImagesRobot(preloadingGen)
        bot.run()
Esempio n. 47
0
 def run(self):
     for page in self.generator:
         try:
             # get the page, and save it using the unmodified text.
             # whether or not getting a redirect throws an exception
             # depends on the variable self.touch_redirects.
             text = page.get()
             originalText = text
             for url in weblinkchecker.weblinksIn(text,
                                                  withoutBracketed=True):
                 filename = url.split('/')[-1]
                 description = pywikibot.translate(pywikibot.getSite(),
                                                   msg) % url
                 bot = upload.UploadRobot(url, description=description)
                 # TODO: check duplicates
                 #filename = bot.uploadImage()
                 #if filename:
                 #    text = text.replace(url, u'[[Image:%s]]' % filename) #
             # only save if there were changes
             #if text != originalText:
             #    page.put(text)
         except pywikibot.NoPage:
             print "Page %s does not exist?!" % page.title(asLink=True)
         except pywikibot.IsRedirectPage:
             print "Page %s is a redirect; skipping." \
                   % page.title(asLink=True)
         except pywikibot.LockedPage:
             print "Page %s is locked?!" % page.title(asLink=True)
Esempio n. 48
0
def main():

    frwikt = pywikibot.getSite('fr', 'wiktionary')

    lista = []
    inp = codecs.open('lista_aneksy', encoding='utf-8')
    usun_poczatek = re.compile('(.*?)----', re.DOTALL)
    usun_liens = re.compile('({{Liens.*?\|(\s*|)mdl=[^}]*?}})', re.DOTALL)

    for line in inp:
        lista.append(line.split())

    for a in lista:
        page = pywikibot.Page(frwikt, 'Modèle:fr-conj-3-%s' % (a[0]))
        text = page.get()
        text_przed = '<noinclude>[[Kategoria:Język francuski - szablony koniugacji|3-%s]] </noinclude>\n<noinclude>Koniugacja czasowników kończących się na \'\'\'\'\'-%s\'\'\'\'\' :\n:<nowiki>{{</nowiki>fr-koniugacja-3-%s | \'\'<prefiks>\'\' | \'\'<IPA prefiksu>\'\' | \'\'<parametry opcjonalne>\'\' }}\n*\'\'<prefiks>\'\' : to, co poprzedza \'\'-%s\'\'\n*\'\'<IPA prefiksu>\'\' : wymowa prefiksu w IPA.\n*\'\'<parametry dodatkowe>\'\' :\n**<code>\'=tak<code> jeśli słowo zaczyna się samogłoską\n**<code>aux-être=tak<code> jeśli posiłkowym czasownikiem jest [[être]]\n----' % (a[0], a[0], a[0], a[0])
        text = re.sub(usun_poczatek, '', text)
        text = re.sub(usun_liens, '', text)
        text = re.sub('fr-conj', 'fr-koniugacja', text)
        text = re.sub('fr-koniugacja/Tableau', 'fr-koniugacja/Tabela', text)
        text = re.sub('fr-koniugacja/Tableau-composé', 'fr-koniugacja/Tabela-złożone', text)
        text = re.sub('gris', 'pogrub', text)
        text = re.sub('{{conj-fr-usuel', '{{fr-koniugacja-zwyczajowe', text)
        text = re.sub('<noinclude>{{Documentation}}</noinclude>', '', text)
        text = re.sub('\|(\s*|)cat={{{clé|{{{1|}}}}}}%s' % (a[0]), '', text)
        text = re.sub('Conjugaison courante', 'Koniugacja powszechna', text)
        text = re.sub('Conjugaison alternative', 'Koniugacja alternatywna', text)
        final = text_przed + text
        print(final)

        filename = "szablony/fr-koniugacja-3-%s" % (a[0])

        file = open(filename, 'w')
        file.write(final.encode( "utf-8" ))
        file.close
Esempio n. 49
0
def main():
    global mysite, linktrail, page
    start = []
    for arg in pywikibot.handleArgs():
        start.append(arg)
    if start:
        start = " ".join(start)
    else:
        start = "!"
    mysite = pywikibot.getSite()
    linktrail = mysite.linktrail()
    try:
        generator = pagegenerators.CategorizedPageGenerator(
            mysite.disambcategory(), start=start)
    except pywikibot.NoPage:
        pywikibot.output(
            "The bot does not know the disambiguation category for your wiki.")
        raise
    # only work on articles
    generator = pagegenerators.NamespaceFilterPageGenerator(generator, [0])
    generator = pagegenerators.PreloadingGenerator(generator)
    pagestodo = []
    pagestoload = []
    for page in generator:
        if page.isRedirectPage():
            continue
        linked = page.linkedPages()
        pagestodo.append((page, linked))
        pagestoload += linked
        if len(pagestoload) > 49:
            pywikibot.getall(mysite, pagestoload)
            for page, links in pagestodo:
                workon(page, links)
            pagestoload = []
            pagestodo = []
Esempio n. 50
0
def main():
    genFactory = pagegenerators.GeneratorFactory()
    commandline_arguments = list()
    templateTitle = u''
    for arg in pywikibot.handleArgs():
        if arg.startswith('-template'):
            if len(arg) == 9:
                templateTitle = pywikibot.input(
                    u'Please enter the template to work on:')
            else:
                templateTitle = arg[10:]
        elif genFactory.handleArg(arg):
            continue
        else:
            commandline_arguments.append(arg)

    if len(commandline_arguments) % 2 or not templateTitle:
        raise ValueError  # or something.
    fields = dict()

    for i in xrange(0, len(commandline_arguments), 2):
        fields[commandline_arguments[i]] = commandline_arguments[i + 1]
    if templateTitle:
        gen = pagegenerators.ReferringPageGenerator(pywikibot.Page(
            pywikibot.getSite(), "Template:%s" % templateTitle),
                                                    onlyTemplateInclusion=True)
    else:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        # TODO: Build a transcluding generator based on templateTitle
        return

    bot = HarvestRobot(gen, templateTitle, fields)
    bot.run()
Esempio n. 51
0
 def __iter__(self):
     import xmlreader
     dump = xmlreader.XmlDump(self.xmlFilename)
     for entry in dump.parse():
         text = pywikibot.removeDisabledParts(entry.text)
         if self.refR.search(text) and not self.referencesR.search(text):
             yield pywikibot.Page(pywikibot.getSite(), entry.title)
Esempio n. 52
0
    def log(self, url, error, containingPage, archiveURL):
        """
        Logs an error report to a text file in the deadlinks subdirectory.
        """
        site = pywikibot.getSite()
        if archiveURL:
            errorReport = u'* %s ([%s archive])\n' % (url, archiveURL)
        else:
            errorReport = u'* %s\n' % url
        for (pageTitle, date, error) in self.historyDict[url]:
            # ISO 8601 formulation
            isoDate = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(date))
            errorReport += "** In [[%s]] on %s, %s\n" % (pageTitle, isoDate,
                                                         error)
        pywikibot.output(u"** Logging link for deletion.")
        txtfilename = pywikibot.config.datafilepath(
            'deadlinks', 'results-%s-%s.txt' % (site.family.name, site.lang))
        txtfile = codecs.open(txtfilename, 'a', 'utf-8')
        self.logCount += 1
        if self.logCount % 30 == 0:
            # insert a caption
            txtfile.write('=== %s ===\n' % containingPage.title()[:3])
        txtfile.write(errorReport)
        txtfile.close()

        if self.reportThread and not containingPage.isTalkPage():
            self.reportThread.report(url, errorReport, containingPage,
                                     archiveURL)
Esempio n. 53
0
def main():

    data = '20110310'

    site = pywikibot.getSite()
    cat = Category(site, 'Kategoria:francuski (indeks)')
    lista = pagegenerators.CategorizedPageGenerator(cat)
    #lista_stron1 = xmlreader.XmlDump('plwiktionary-%s-pages-articles.xml' % data)

    #lista = xmlreader.XmlDump.parse(lista_stron1)

    for a in lista:
        h = Haslo(a.title())
        #h = HasloXML(a.title, a.text)
        if h.type != 4 and ' ' in h.title:
            h.langs()
            for c in h.list_lang:
                c.pola()
                if c.type != 2 and c.lang == 'hiszpański':
                    if ('rzeczownik' in c.znaczenia.tresc) and (
                            'rzeczownika' not in c.znaczenia.tresc):
                        print('\n' + h.title)
                        text = '*[[%s]]\n' % h.title
                        file = open("log/rzeczownik.txt", 'a')
                        file.write(text.encode("utf-8"))
                        file.close
Esempio n. 54
0
def main():
    global mysite, linktrail, page
    start = []
    for arg in pywikibot.handleArgs():
        start.append(arg)
    if start:
        start = " ".join(start)
    else:
        start = "!"
    mysite = pywikibot.getSite()
    linktrail = mysite.linktrail()
    try:
        generator = pagegenerators.CategorizedPageGenerator(
            mysite.disambcategory(), start=start)
    except pywikibot.NoPage:
        pywikibot.output(
            "The bot does not know the disambiguation category for your wiki.")
        raise
    # only work on articles
    generator = pagegenerators.NamespaceFilterPageGenerator(generator, [0])
    generator = pagegenerators.PreloadingGenerator(generator)
    pagestodo = []
    pagestoload = []
    for page in generator:
        if page.isRedirectPage():
            continue
        linked = page.linkedPages()
        pagestodo.append((page, linked))
        pagestoload += linked
        if len(pagestoload) > 49:
            pagestoload = pagegenerators.PreloadingGenerator(pagestoload)
            for page, links in pagestodo:
                workon(page, links)
            pagestoload = []
            pagestodo = []
Esempio n. 55
0
def write_templates(res):
    import pywikibot
    from common.pywikibot_utils import safe_put

    for dom in [ 'fr','en', 'bn', 'pl' ]:
	if dom=='fr':
	    sep=' '
	elif dom == 'en':
	    sep=','
        else:
            sep = ''

        num, num_q0, num_q2, num_q3, num_q4, num_tr, num_texts, num_disambig = decode_res( res[dom] )
        percent = num_tr*100./(num_texts-num_disambig)
        num_q1 = num - (num_q0 + num_q2 + num_q3 + num_q4 ) 

	site = pywikibot.getSite(dom,fam='wikisource')
        page = pywikibot.Page(site,"Template:PAGES_NOT_PROOFREAD")
        safe_put(page, spaced_int(num_q1,sep), "")
        page = pywikibot.Page(site,"Template:ALL_PAGES")
        safe_put(page, spaced_int(num,sep), "")
        page = pywikibot.Page(site,"Template:PR_TEXTS")
        safe_put(page, spaced_int(num_tr,sep), "")
        page = pywikibot.Page(site,"Template:ALL_TEXTS")
        safe_put(page, spaced_int(num_texts - num_disambig,sep), "")
        page = pywikibot.Page(site,"Template:PR_PERCENT")
        safe_put(page, "%.2f"%percent, "")