def weblinksIn(text, withoutBracketed = False, onlyBracketed = False): text = pywikibot.removeDisabledParts(text) # MediaWiki parses templates before parsing external links. Thus, there # might be a | or a } directly after a URL which does not belong to # the URL itself. # First, remove the curly braces of inner templates: nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}') while nestedTemplateR.search(text): text = nestedTemplateR.sub(r'{{\1 \2 \3}}', text) # Then blow up the templates with spaces so that the | and }} will not be regarded as part of the link:. templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\|([^ ][^}]*?)}}', re.DOTALL) while templateWithParamsR.search(text): text = templateWithParamsR.sub(r'{{ \1 | \2 }}', text) linkR = pywikibot.compileLinkR(withoutBracketed, onlyBracketed) # Remove HTML comments in URLs as well as URLs in HTML comments. # Also remove text inside nowiki links etc. text = pywikibot.removeDisabledParts(text) for m in linkR.finditer(text): yield m.group('url')
def __iter__(self): import xmlreader dump = xmlreader.XmlDump(self.xmlFilename) for entry in dump.parse(): text = pywikibot.removeDisabledParts(entry.text) if self.refR.search(text) and not self.referencesR.search(text): yield pywikibot.Page(pywikibot.getSite(), entry.title)
def getLinks(wtext): #adapted from linkedPages() http://svn.wikimedia.org/svnroot/pywikipedia/trunk/pywikipedia/wikipedia.py links = [] wtext = wikipedia.removeLanguageLinks(wtext, site) wtext = wikipedia.removeCategoryLinks(wtext, site) # remove HTML comments, pre, nowiki, and includeonly sections # from text before processing wtext = wikipedia.removeDisabledParts(wtext) # resolve {{ns:-1}} or {{ns:Help}} wtext = site.resolvemagicwords(wtext) for match in Rlink.finditer(wtext): title = match.group('title') title = title.replace("_", " ").strip(" ") if title.startswith("#"): # this is an internal section link continue if not site.isInterwikiLink(title): if title.startswith("#"): # [[#intrasection]] same article continue title = title.split('#')[ 0] # removing sections [[other article#section|blabla]] title = '%s%s' % (title[:1].upper(), title[1:]) #first up title = title.strip() if title.startswith(":") or title.startswith( "File:") or title.startswith("Image:") or title.startswith( "Category:"): # files, cats, etc continue if title and title not in links: links.append(title) return links
def lacksReferences(self, text, verbose = True): """ Checks whether or not the page is lacking a references tag. """ oldTextCleaned = pywikibot.removeDisabledParts(text) if self.referencesR.search(oldTextCleaned) or \ self.referencesTagR.search(oldTextCleaned): if verbose: pywikibot.output(u'No changes necessary: references tag found.') return False elif self.referencesTemplates: templateR = u'{{(' + u'|'.join(self.referencesTemplates) + ')' if re.search(templateR, oldTextCleaned, re.IGNORECASE|re.UNICODE): if verbose: pywikibot.output( u'No changes necessary: references template found.') return False if not self.refR.search(oldTextCleaned): if verbose: pywikibot.output(u'No changes necessary: no ref tags found.') return False else: if verbose: pywikibot.output(u'Found ref without references.') return True
def procesPage(self, page): """ Proces a single page """ item = pywikibot.DataPage(page) pywikibot.output('Processing %s' % page) if not item.exists(): pywikibot.output('%s doesn\'t have a wikidata item :(' % page) #TODO FIXME: We should provide an option to create the page else: pagetext = page.get() pagetext = pywikibot.removeDisabledParts(pagetext) templates = pywikibot.extract_templates_and_params(pagetext) for (template, fielddict) in templates: # We found the template we were looking for if template.replace(u'_', u' ') == self.templateTitle: for field, value in fielddict.items(): # This field contains something useful for us if field in self.fields: # Check if the property isn't already set claim = self.fields[field] if claim in item.get().get('claims'): pywikibot.output( u'A claim for %s already exists. Skipping' % (claim,)) # TODO FIXME: This is a very crude way of dupe # checking else: # Try to extract a valid page match = re.search(re.compile( r'\[\[(?P<title>[^\]|[#<>{}]*)(\|.*?)?\]\]'), value) if match: try: link = match.group(1) linkedPage = pywikibot.Page(self.site, link) if linkedPage.isRedirectPage(): linkedPage = linkedPage.getRedirectTarget() linkedItem = pywikibot.DataPage(linkedPage) pywikibot.output('Adding %s --> %s' % (claim, linkedItem.getID())) if self.setSource(self.site().language()): item.editclaim( str(claim), linkedItem.getID(), refs={self.setSource( self.site().language())}) else: item.editclaim(str(claim), linkedItem.getID()) except pywikibot.NoPage: pywikibot.output( "[[%s]] doesn't exist so I can't link to it" % linkedItem.title())
def procesPage(self, page): """ Proces a single page """ item = pywikibot.DataPage(page) pywikibot.output('Processing %s' % page) if not item.exists(): pywikibot.output('%s doesn\'t have a wikidata item :(' % page) #TODO FIXME: We should provide an option to create the page else: pagetext = page.get() pagetext = pywikibot.removeDisabledParts(pagetext) templates = pywikibot.extract_templates_and_params(pagetext) for (template, fielddict) in templates: # We found the template we were looking for if template.replace(u'_', u' ') == self.templateTitle: for field, value in fielddict.items(): # This field contains something useful for us if field in self.fields: # Check if the property isn't already set claim = self.fields[field] if claim in item.get().get('claims'): pywikibot.output( u'A claim for %s already exists. Skipping' % (claim, )) # TODO FIXME: This is a very crude way of dupe # checking else: # Try to extract a valid page match = re.search( re.compile( r'\[\[(?P<title>[^\]|[#<>{}]*)(\|.*?)?\]\]' ), value) if match: try: link = match.group(1) linkedPage = pywikibot.Page( self.site, link) if linkedPage.isRedirectPage(): linkedPage = linkedPage.getRedirectTarget( ) linkedItem = pywikibot.DataPage( linkedPage) pywikibot.output( 'Adding %s --> %s' % (claim, linkedItem.getID())) if self.setSource( self.site().language()): item.editclaim( str(claim), linkedItem.getID(), refs={ self.setSource( self.site().language()) }) else: item.editclaim( str(claim), linkedItem.getID()) except pywikibot.NoPage: pywikibot.output( "[[%s]] doesn't exist so I can't link to it" % linkedItem.title())
def standardizePageFooter(self, text): """ Makes sure that interwiki links, categories and star templates are put to the correct position and into the right order. This combines the old instances standardizeInterwiki and standardizeCategories The page footer has the following section in that sequence: 1. categories 2. additional information depending on local site policy 3. stars templates for featured and good articles 4. interwiki links """ starsList = [ u'bueno', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] categories = None interwikiLinks = None allstars = [] hasCommentLine = False # The PyWikipediaBot is no longer allowed to touch categories on the German Wikipedia. # See http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/bis_2006#Position_der_Personendaten_am_.22Artikelende.22 # ignoring nn-wiki of cause of the comment line above iw section if not self.template and not '{{Personendaten' in text: categories = pywikibot.getCategoryLinks(text, site=self.site) if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki': subpage = False if self.template: loc = None try: tmpl, loc = moved_links[self.site.lang] del tmpl except KeyError: pass if loc != None and loc in self.title: subpage = True interwikiLinks = pywikibot.getLanguageLinks( text, insite=self.site, template_subpage=subpage) # Removing the interwiki text = pywikibot.removeLanguageLinks(text, site=self.site) # Removing the stars' issue starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile( '(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: if pywikibot.verbose: print found text = regex.sub('', text) allstars += found # nn got a message between the categories and the iw's # and they want to keep it there, first remove it if self.site.language() == 'nn': regex = re.compile( '(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)' ) found = regex.findall(text) if found: if pywikibot.verbose: print found hasCommentLine = True text = regex.sub('', text) # Adding categories if categories: text = pywikibot.replaceCategoryLinks(text, categories, site=self.site) # Put the nn iw message back if self.site.language() == 'nn' and not self.talkpage and ( interwikiLinks or hasCommentLine): text = text + '\r\n\r\n' + nn_iw_msg # Adding stars templates if allstars: text = text.strip() + self.site.family.interwiki_text_separator allstars.sort() for element in allstars: text += '%s\r\n' % element.strip() if pywikibot.verbose: pywikibot.output(u'%s' % element.strip()) # Adding the interwiki if interwikiLinks: text = pywikibot.replaceLanguageLinks(text, interwikiLinks, site=self.site, template=self.template, template_subpage=subpage) return text
def run(self): """ Runs the Bot """ pywikibot.setAction(pywikibot.translate(self.site, msg)) try: deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read() except IOError: pywikibot.output( 'You need to download http://www.twoevils.org/files/wikipedia/404-links.txt.gz and to ungzip it in the same directory') raise socket.setdefaulttimeout(30) editedpages = 0 for page in self.generator: try: # Load the page's text from the wiki new_text = page.get() if not page.canBeEdited(): pywikibot.output(u"You can't edit page %s" % page.title(asLink=True)) continue except pywikibot.NoPage: pywikibot.output(u'Page %s not found' % page.title(asLink=True)) continue except pywikibot.IsRedirectPage: pywikibot.output(u'Page %s is a redirect' % page.title(asLink=True)) continue for match in linksInRef.finditer(pywikibot.removeDisabledParts(page.get())): #for each link to change link = match.group(u'url') #debugging purpose #print link if u'jstor.org' in link: #TODO: Clean URL blacklist continue ref = RefLink(link, match.group('name')) f = None try: socket.setdefaulttimeout(20) try: f = urllib2.urlopen(ref.url.decode("utf8")) except UnicodeError: ref.url = urllib2.quote(ref.url.encode("utf8"),"://") f = urllib2.urlopen(ref.url) #Try to get Content-Type from server headers = f.info() contentType = headers.getheader('Content-Type') if contentType and not self.MIME.search(contentType): if ref.link.lower().endswith('.pdf') and \ not self.ignorepdf: # If file has a PDF suffix self.getPDFTitle(ref, f) else: pywikibot.output( u'\03{lightyellow}WARNING\03{default} : media : %s ' % ref.link) if ref.title: if not re.match( '(?i) *microsoft (word|excel|visio)', ref.title): ref.transform(ispdf=True) repl = ref.refTitle() else: pywikibot.output( '\03{lightyellow}WARNING\03{default} : PDF title blacklisted : %s ' % ref.title) repl = ref.refLink() else: repl = ref.refLink() new_text = new_text.replace(match.group(), repl) continue # Get the real url where we end (http redirects !) redir = f.geturl() if redir != ref.link and \ domain.findall(redir) == domain.findall(link): if soft404.search(redir) and \ not soft404.search(ref.link): pywikibot.output( u'\03{lightyellow}WARNING\03{default} : Redirect 404 : %s ' % ref.link) continue if dirIndex.match(redir) and \ not dirIndex.match(ref.link): pywikibot.output( u'\03{lightyellow}WARNING\03{default} : Redirect to root : %s ' % ref.link) continue # uncompress if necessary if headers.get('Content-Encoding') in ('gzip', 'x-gzip'): # XXX: small issue here: the whole page is downloaded # through f.read(). It might fetch big files/pages. # However, truncating an encoded gzipped stream is not # an option, for unzipping will fail. compressed = StringIO.StringIO(f.read()) f = gzip.GzipFile(fileobj=compressed) # Read the first 1,000,000 bytes (0.95 MB) linkedpagetext = f.read(1000000) socket.setdefaulttimeout(None) except UnicodeError: #example : http://www.adminet.com/jo/20010615¦/ECOC0100037D.html # in [[fr:Cyanure]] pywikibot.output( u'\03{lightred}Bad link\03{default} : %s in %s' % (ref.url, page.title(asLink=True))) continue except urllib2.HTTPError, e: pywikibot.output(u'HTTP error (%s) for %s on %s' % (e.code, ref.url, page.title(asLink=True)), toStdout = True) # 410 Gone, indicates that the resource has been purposely # removed if e.code == 410 or \ (e.code == 404 and (u'\t%s\t' % ref.url in deadLinks)): repl = ref.refDead() new_text = new_text.replace(match.group(), repl) continue except (urllib2.URLError, socket.error, IOError, httplib.error), e: #except (urllib2.URLError, socket.timeout, ftplib.error, httplib.error, socket.error), e: pywikibot.output(u'Can\'t retrieve page %s : %s' % (ref.url, e)) continue except ValueError: #Known bug of httplib, google for : #"httplib raises ValueError reading chunked content" continue
def add_text(page=None, addText=None, summary=None, regexSkip=None, regexSkipUrl=None, always=False, up=False, putText=True, oldTextGiven=None, reorderEnabled=True, create=False): # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. starsList = [ u'bueno', u'bom interwiki', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] errorCount = 0 site = pywikibot.getSite() pathWiki = site.family.nicepath(site.lang) site = pywikibot.getSite() if oldTextGiven is None: try: text = page.get() except pywikibot.NoPage: if create: pywikibot.output(u"%s doesn't exist, creating it!" % page.title()) text = u'' else: pywikibot.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) except pywikibot.IsRedirectPage: pywikibot.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) else: text = oldTextGiven # If not up, text put below if not up: newtext = text # Translating the \\n into binary \n addText = addText.replace('\\n', '\n') if (reorderEnabled): # Getting the categories categoriesInside = pywikibot.getCategoryLinks(newtext, site) # Deleting the categories newtext = pywikibot.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = pywikibot.getLanguageLinks(newtext, site) # Removing the interwiki newtext = pywikibot.removeLanguageLinks(newtext, site) # Adding the text newtext += u"\n%s" % addText # Reputting the categories newtext = pywikibot.replaceCategoryLinks(newtext, categoriesInside, site, True) # Dealing the stars' issue allstars = [] starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub('', newtext) allstars += found if allstars != []: newtext = newtext.strip() + '\r\n\r\n' allstars.sort() for element in allstars: newtext += '%s\r\n' % element.strip() # Adding the interwiki newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site) else: newtext += u"\n%s" % addText else: newtext = addText + '\n' + text if putText and text != newtext: pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) #pywikibot.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if always or choice == 'y': try: pass if always: page.put(newtext, summary, minorEdit=False) else: page.put_async(newtext, summary, minorEdit=False) except pywikibot.EditConflict: pywikibot.output(u'Edit conflict! skip!') return (False, False, always) except pywikibot.ServerError: errorCount += 1 if errorCount < 5: pywikibot.output(u'Server Error! Wait..') time.sleep(5) continue else: raise pywikibot.ServerError(u'Fifth Server Error!') except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) return (False, False, always) except pywikibot.PageNotSaved, error: pywikibot.output(u'Error putting page: %s' % error.args) return (False, False, always) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always)
def add_text( page=None, addText=None, summary=None, regexSkip=None, regexSkipUrl=None, always=False, up=False, putText=True, oldTextGiven=None, create=False, ): if not addText: raise NoEnoughData("You have to specify what text you want to add!") if not summary: summary = wikipedia.translate(wikipedia.getSite(), msg) % addText[:200] # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. starsList = [ u"bueno", u"cyswllt[ _]erthygl[ _]ddethol", u"dolen[ _]ed", u"destacado", u"destaca[tu]", u"enllaç[ _]ad", u"enllaz[ _]ad", u"leam[ _]vdc", u"legătură[ _]a[bcf]", u"liamm[ _]pub", u"lien[ _]adq", u"lien[ _]ba", u"liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt", u"liên[ _]kết[ _]chọn[ _]lọc", u"ligam[ _]adq", u"ligoelstara", u"ligoleginda", u"link[ _][afgu]a", u"link[ _]adq", u"link[ _]f[lm]", u"link[ _]km", u"link[ _]sm", u"linkfa", u"na[ _]lotura", u"nasc[ _]ar", u"tengill[ _][úg]g", u"ua", u"yüm yg", u"רא", u"وصلة مقالة جيدة", u"وصلة مقالة مختارة", ] errorCount = 0 site = wikipedia.getSite() # /wiki/ is not always the right path in non-wiki projects pathWiki = site.family.nicepath(site.lang) if putText: wikipedia.output(u"Loading %s..." % page.title()) if oldTextGiven == None: try: text = page.get() except wikipedia.NoPage: if create: wikipedia.output(u"%s doesn't exist, creating it!" % page.title()) text = u"" else: wikipedia.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) # continue except wikipedia.IsRedirectPage: wikipedia.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) # continue else: text = oldTextGiven # Understand if the bot has to skip the page or not # In this way you can use both -except and -excepturl if regexSkipUrl != None: url = "%s%s" % (pathWiki, page.urlname()) result = re.findall(regexSkipUrl, site.getUrl(url)) if result != []: wikipedia.output(u"Exception! regex (or word) used with -exceptUrl is in the page. Skip!") return (False, False, always) # continue if regexSkip != None: result = re.findall(regexSkip, text) if result != []: wikipedia.output(u"Exception! regex (or word) used with -except is in the page. Skip!") return (False, False, always) # continue # If not up, text put below if not up: newtext = text # Getting the categories categoriesInside = wikipedia.getCategoryLinks(newtext, site) # Deleting the categories newtext = wikipedia.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = wikipedia.getLanguageLinks(newtext, site) # Removing the interwiki newtext = wikipedia.removeLanguageLinks(newtext, site) # nn got a message between the categories and the iw's and they want to keep it there, first remove it if site.language() == u"nn": newtext = newtext.replace(nn_iw_msg, "") # Translating the \\n into binary \n addText = addText.replace("\\n", "\n") # Adding the text newtext += u"\n%s" % addText # Reputting the categories newtext = wikipedia.replaceCategoryLinks(newtext, categoriesInside, site, True) # Put the nn iw message back if site.language() == u"nn": newtext = newtext + u"\n" + nn_iw_msg # Dealing the stars' issue allstars = [] starstext = wikipedia.removeDisabledParts(text) for star in starsList: regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub("", newtext) allstars += found if allstars != []: newtext = newtext.strip() + "\r\n\r\n" allstars.sort() for element in allstars: newtext += "%s\r\n" % element.strip() # Adding the interwiki newtext = wikipedia.replaceLanguageLinks(newtext, interwikiInside, site) # If instead the text must be added above... else: newtext = addText + "\n" + text if putText and text != newtext: wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) wikipedia.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if not always: choice = wikipedia.inputChoice( u"Do you want to accept these changes?", ["Yes", "No", "All"], ["y", "N", "a"], "N" ) if choice == "a": always = True elif choice == "n": return (False, False, always) if always or choice == "y": try: if always: page.put(newtext, summary) else: page.put_async(newtext, summary) except wikipedia.EditConflict: wikipedia.output(u"Edit conflict! skip!") return (False, False, always) except wikipedia.ServerError: errorCount += 1 if errorCount < 5: wikipedia.output(u"Server Error! Wait..") time.sleep(5) continue else: raise wikipedia.ServerError(u"Fifth Server Error!") except wikipedia.SpamfilterError, e: wikipedia.output(u"Cannot change %s because of blacklist entry %s" % (page.title(), e.url)) return (False, False, always) except wikipedia.PageNotSaved, error: wikipedia.output(u"Error putting page: %s" % error.args) return (False, False, always) except wikipedia.LockedPage: wikipedia.output(u"Skipping %s (locked page)" % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always)
def standardizePageFooter(self, text): """ Makes sure that interwiki links, categories and star templates are put to the correct position and into the right order. This combines the old instances standardizeInterwiki and standardizeCategories The page footer has the following section in that sequence: 1. categories 2. ## TODO: template beyond categories ## 3. additional information depending on local site policy 4. stars templates for featured and good articles 5. interwiki links """ starsList = [ u'bueno', u'bom interwiki', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligazón[ _]a[bd]', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] categories = None interwikiLinks = None allstars = [] # The PyWikipediaBot is no longer allowed to touch categories on the # German Wikipedia. See # http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22 # ignoring nn-wiki of cause of the comment line above iw section if not self.template and '{{Personendaten' not in text and \ '{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \ self.site.lang not in ('et', 'it', 'bg', 'ru'): try: categories = pywikibot.getCategoryLinks(text, site=self.site) # there are categories like [[categoy:Foo {{#time:Y...}}]] except pywikibot.InvalidTitle: pass if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki': subpage = False if self.template: loc = None try: tmpl, loc = moved_links[self.site.lang] del tmpl except KeyError: pass if loc is not None and loc in self.title: subpage = True interwikiLinks = pywikibot.getLanguageLinks( text, insite=self.site, template_subpage=subpage) # Removing the interwiki text = pywikibot.removeLanguageLinks(text, site=self.site) # Removing the stars' issue starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: text = regex.sub('', text) allstars += found # Adding categories if categories: ##Sorting categories in alphabetic order. beta test only on Persian Wikipedia, TODO fix bug for sorting #if self.site.language() == 'fa': # categories.sort() ##Taking main cats to top # for name in categories: # if re.search(u"(.+?)\|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title: # categories.remove(name) # categories.insert(0, name) text = pywikibot.replaceCategoryLinks(text, categories, site=self.site) # Adding stars templates if allstars: text = text.strip() + self.site.family.interwiki_text_separator allstars.sort() for element in allstars: text += '%s\r\n' % element.strip() if pywikibot.verbose: pywikibot.output(u'%s' % element.strip()) # Adding the interwiki if interwikiLinks: text = pywikibot.replaceLanguageLinks(text, interwikiLinks, site=self.site, template=self.template, template_subpage=subpage) return text
def add_text( page=None, addText=None, summary=None, regexSkip=None, regexSkipUrl=None, always=False, up=False, putText=True, oldTextGiven=None, reorderEnabled=True, create=False, ): if not addText: raise NoEnoughData("You have to specify what text you want to add!") if not summary: summary = i18n.twtranslate(pywikibot.getSite(), "add_text-adding", {"adding": addText[:200]}) # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. errorCount = 0 site = pywikibot.getSite() pathWiki = site.family.nicepath(site.lang) if putText: pywikibot.output(u"Loading %s..." % page.title()) if oldTextGiven is None: try: text = page.get() except pywikibot.NoPage: if create: pywikibot.output(u"%s doesn't exist, creating it!" % page.title()) text = u"" else: pywikibot.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) except pywikibot.IsRedirectPage: pywikibot.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) else: text = oldTextGiven # Understand if the bot has to skip the page or not # In this way you can use both -except and -excepturl if regexSkipUrl is not None: url = "%s%s" % (pathWiki, page.urlname()) result = re.findall(regexSkipUrl, site.getUrl(url)) if result != []: pywikibot.output( u"""Exception! regex (or word) used with -exceptUrl is in the page. Skip! Match was: %s""" % result ) return (False, False, always) if regexSkip is not None: result = re.findall(regexSkip, text) if result != []: pywikibot.output( u"""Exception! regex (or word) used with -except is in the page. Skip! Match was: %s""" % result ) return (False, False, always) # If not up, text put below if not up: newtext = text # Translating the \\n into binary \n addText = addText.replace("\\n", config.line_separator) if reorderEnabled: # Getting the categories categoriesInside = pywikibot.getCategoryLinks(newtext, site) # Deleting the categories newtext = pywikibot.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = pywikibot.getLanguageLinks(newtext, site) # Removing the interwiki newtext = pywikibot.removeLanguageLinks(newtext, site) # Adding the text newtext += u"%s%s" % (config.line_separator, addText) # Reputting the categories newtext = pywikibot.replaceCategoryLinks(newtext, categoriesInside, site, True) # Dealing the stars' issue allstars = [] starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub("", newtext) allstars += found if allstars != []: newtext = newtext.strip() + config.line_separator * 2 allstars.sort() for element in allstars: newtext += "%s%s" % (element.strip(), config.LS) # Adding the interwiki newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site) else: newtext += u"%s%s" % (config.line_separator, addText) else: newtext = addText + config.line_separator + text if putText and text != newtext: pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if not always: choice = pywikibot.inputChoice( u"Do you want to accept these changes?", ["Yes", "No", "All", "open in Browser"], ["y", "n", "a", "b"], "n", ) if choice == "a": always = True elif choice == "n": return (False, False, always) elif choice == "b": webbrowser.open("http://%s%s" % (page.site.hostname(), page.site.nice_get_address(page.title()))) pywikibot.input("Press Enter when finished in browser.") if always or choice == "y": try: if always: page.put(newtext, summary, minorEdit=page.namespace() != 3) else: page.put_async(newtext, summary, minorEdit=page.namespace() != 3) except pywikibot.EditConflict: pywikibot.output(u"Edit conflict! skip!") return (False, False, always) except pywikibot.ServerError: errorCount += 1 if errorCount < config.maxretries: pywikibot.output(u"Server Error! Wait..") time.sleep(5) continue else: raise pywikibot.ServerError(u"Fifth Server Error!") except pywikibot.SpamfilterError as e: pywikibot.output(u"Cannot change %s because of blacklist entry %s" % (page.title(), e.url)) return (False, False, always) except pywikibot.PageNotSaved as error: pywikibot.output(u"Error putting page: %s" % error.args) return (False, False, always) except pywikibot.LockedPage: pywikibot.output(u"Skipping %s (locked page)" % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always) else: return (text, newtext, always)
def add_text(page=None, addText=None, summary=None, regexSkip=None, regexSkipUrl=None, always=False, up=False, putText=True, oldTextGiven=None, reorderEnabled=True, create=False): if not addText: raise NoEnoughData('You have to specify what text you want to add!') if not summary: summary = i18n.twtranslate(pywikibot.getSite(), 'add_text-adding', {'adding': addText[:200]}) # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. errorCount = 0 site = pywikibot.getSite() pathWiki = site.family.nicepath(site.lang) if putText: pywikibot.output(u'Loading %s...' % page.title()) if oldTextGiven is None: try: text = page.get() except pywikibot.NoPage: if create: pywikibot.output(u"%s doesn't exist, creating it!" % page.title()) text = u'' else: pywikibot.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) except pywikibot.IsRedirectPage: pywikibot.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) else: text = oldTextGiven # Understand if the bot has to skip the page or not # In this way you can use both -except and -excepturl if regexSkipUrl is not None: url = '%s%s' % (pathWiki, page.urlname()) result = re.findall(regexSkipUrl, site.getUrl(url)) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -exceptUrl is in the page. Skip! Match was: %s''' % result) return (False, False, always) if regexSkip is not None: result = re.findall(regexSkip, text) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -except is in the page. Skip! Match was: %s''' % result) return (False, False, always) # If not up, text put below if not up: newtext = text # Translating the \\n into binary \n addText = addText.replace('\\n', config.line_separator) if (reorderEnabled): # Getting the categories categoriesInside = pywikibot.getCategoryLinks(newtext, site) # Deleting the categories newtext = pywikibot.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = pywikibot.getLanguageLinks(newtext, site) # Removing the interwiki newtext = pywikibot.removeLanguageLinks(newtext, site) # Adding the text newtext += u"%s%s" % (config.line_separator, addText) # Reputting the categories newtext = pywikibot.replaceCategoryLinks(newtext, categoriesInside, site, True) # Dealing the stars' issue allstars = [] starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub('', newtext) allstars += found if allstars != []: newtext = newtext.strip() + config.line_separator * 2 allstars.sort() for element in allstars: newtext += '%s%s' % (element.strip(), config.LS) # Adding the interwiki newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site) else: newtext += u"%s%s" % (config.line_separator, addText) else: newtext = addText + config.line_separator + text if putText and text != newtext: pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if not always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All', 'open in Browser'], ['y', 'n', 'a', 'b'], 'n') if choice == 'a': always = True elif choice == 'n': return (False, False, always) elif choice == 'b': webbrowser.open("http://%s%s" % ( page.site.hostname(), page.site.nice_get_address(page.title()) )) pywikibot.input("Press Enter when finished in browser.") if always or choice == 'y': try: if always: page.put(newtext, summary, minorEdit=page.namespace() != 3) else: page.put_async(newtext, summary, minorEdit=page.namespace() != 3) except pywikibot.EditConflict: pywikibot.output(u'Edit conflict! skip!') return (False, False, always) except pywikibot.ServerError: errorCount += 1 if errorCount < config.maxretries: pywikibot.output(u'Server Error! Wait..') time.sleep(5) continue else: raise pywikibot.ServerError(u'Fifth Server Error!') except pywikibot.SpamfilterError as e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) return (False, False, always) except pywikibot.PageNotSaved as error: pywikibot.output(u'Error putting page: %s' % error.args) return (False, False, always) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always) else: return (text, newtext, always)
def add_text(page = None, addText = None, summary = None, regexSkip = None, regexSkipUrl = None, always = False, up = False, putText = True, oldTextGiven = None, reorderEnabled = True, create=False): if not addText: raise NoEnoughData('You have to specify what text you want to add!') if not summary: summary = i18n.twtranslate(pywikibot.getSite(), 'add_text-adding', {'adding': addText[:200]}) # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. starsList = [ u'bueno', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] errorCount = 0 site = pywikibot.getSite() # /wiki/ is not always the right path in non-wiki projects pathWiki = site.family.nicepath(site.lang) if putText: pywikibot.output(u'Loading %s...' % page.title()) if oldTextGiven == None: try: text = page.get() except pywikibot.NoPage: if create: pywikibot.output(u"%s doesn't exist, creating it!" % page.title()) text = u'' else: pywikibot.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) # continue except pywikibot.IsRedirectPage: pywikibot.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) # continue else: text = oldTextGiven # Understand if the bot has to skip the page or not # In this way you can use both -except and -excepturl if regexSkipUrl != None: url = '%s%s' % (pathWiki, page.urlname()) result = re.findall(regexSkipUrl, site.getUrl(url)) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -exceptUrl is in the page. Skip! Match was: %s''' % result) return (False, False, always) # continue if regexSkip != None: result = re.findall(regexSkip, text) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -except is in the page. Skip! Match was: %s''' % result) return (False, False, always) # continue # If not up, text put below if not up: newtext = text # Translating the \\n into binary \n addText = addText.replace('\\n', '\n') if (reorderEnabled): # Getting the categories categoriesInside = pywikibot.getCategoryLinks(newtext, site) # Deleting the categories #newtext = pywikibot.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = pywikibot.getLanguageLinks(newtext, site) # Removing the interwiki newtext = pywikibot.removeLanguageLinks(newtext, site) # nn got a message between the categories and the iw's # and they want to keep it there, first remove it hasCommentLine = False if (site.language()==u'nn'): regex = re.compile('(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)') found = regex.findall(newtext) if found: hasCommentLine = True newtext = regex.sub('', newtext) # Adding the text newtext += u"\n%s" % addText # Reputting the categories #newtext = pywikibot.replaceCategoryLinks(newtext, #categoriesInside, site, True) #Put the nn iw message back if site.language()==u'nn' and (interwikiInside or hasCommentLine): newtext = newtext + u'\r\n\r\n' + nn_iw_msg # Dealing the stars' issue allstars = [] starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub('', newtext) allstars += found if allstars != []: newtext = newtext.strip()+'\r\n\r\n' allstars.sort() for element in allstars: newtext += '%s\r\n' % element.strip() # Adding the interwiki newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site) else: # Adding the text newtext += u"\n%s" % addText # If instead the text must be added above... else: newtext = addText + '\n' + text if putText and text != newtext: pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if not always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice == 'a': always = True elif choice == 'n': return (False, False, always) if always or choice == 'y': try: if always: page.put(newtext, summary) else: page.put_async(newtext, summary) except pywikibot.EditConflict: pywikibot.output(u'Edit conflict! skip!') return (False, False, always) except pywikibot.ServerError: errorCount += 1 if errorCount < 5: pywikibot.output(u'Server Error! Wait..') time.sleep(5) continue else: raise pywikibot.ServerError(u'Fifth Server Error!') except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) return (False, False, always) except pywikibot.PageNotSaved, error: pywikibot.output(u'Error putting page: %s' % error.args) return (False, False, always) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always)
def run(self): """ Runs the Bot """ try: deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read() except IOError: pywikibot.output( 'You need to download ' 'http://www.twoevils.org/files/wikipedia/404-links.txt.gz ' 'and to ungzip it in the same directory') raise socket.setdefaulttimeout(30) editedpages = 0 for page in self.generator: try: # Load the page's text from the wiki new_text = page.get() if not page.canBeEdited(): pywikibot.output(u"You can't edit page %s" % page.title(asLink=True)) continue except pywikibot.NoPage: pywikibot.output(u'Page %s not found' % page.title(asLink=True)) continue except pywikibot.IsRedirectPage: pywikibot.output(u'Page %s is a redirect' % page.title(asLink=True)) continue # for each link to change for match in linksInRef.finditer( pywikibot.removeDisabledParts(page.get())): link = match.group(u'url') #debugging purpose #print link if u'jstor.org' in link: #TODO: Clean URL blacklist continue ref = RefLink(link, match.group('name')) f = None try: socket.setdefaulttimeout(20) try: f = urllib2.urlopen(ref.url.decode("utf8")) except UnicodeError: ref.url = urllib2.quote(ref.url.encode("utf8"), "://") f = urllib2.urlopen(ref.url) #Try to get Content-Type from server headers = f.info() contentType = headers.getheader('Content-Type') if contentType and not self.MIME.search(contentType): if ref.link.lower().endswith('.pdf') and \ not self.ignorepdf: # If file has a PDF suffix self.getPDFTitle(ref, f) else: pywikibot.output( u'\03{lightyellow}WARNING\03{default} : ' u'media : %s ' % ref.link) if ref.title: if not re.match( u'(?i) *microsoft (word|excel|visio)', ref.title): ref.transform(ispdf=True) repl = ref.refTitle() else: pywikibot.output( u'\03{lightyellow}WARNING\03{default} : ' u'PDF title blacklisted : %s ' % ref.title) repl = ref.refLink() else: repl = ref.refLink() new_text = new_text.replace(match.group(), repl) continue # Get the real url where we end (http redirects !) redir = f.geturl() if redir != ref.link and \ domain.findall(redir) == domain.findall(link): if soft404.search(redir) and \ not soft404.search(ref.link): pywikibot.output( u'\03{lightyellow}WARNING\03{default} : ' u'Redirect 404 : %s ' % ref.link) continue if dirIndex.match(redir) and \ not dirIndex.match(ref.link): pywikibot.output( u'\03{lightyellow}WARNING\03{default} : ' u'Redirect to root : %s ' % ref.link) continue # uncompress if necessary if headers.get('Content-Encoding') in ('gzip', 'x-gzip'): # XXX: small issue here: the whole page is downloaded # through f.read(). It might fetch big files/pages. # However, truncating an encoded gzipped stream is not # an option, for unzipping will fail. compressed = StringIO.StringIO(f.read()) f = gzip.GzipFile(fileobj=compressed) # Read the first 1,000,000 bytes (0.95 MB) linkedpagetext = f.read(1000000) socket.setdefaulttimeout(None) except UnicodeError: # example : http://www.adminet.com/jo/20010615¦/ECOC0100037D.html # in [[fr:Cyanure]] pywikibot.output( u'\03{lightred}Bad link\03{default} : %s in %s' % (ref.url, page.title(asLink=True))) continue except urllib2.HTTPError, e: pywikibot.output( u'HTTP error (%s) for %s on %s' % (e.code, ref.url, page.title(asLink=True)), toStdout=True) # 410 Gone, indicates that the resource has been purposely # removed if e.code == 410 or \ (e.code == 404 and (u'\t%s\t' % ref.url in deadLinks)): repl = ref.refDead() new_text = new_text.replace(match.group(), repl) continue except (urllib2.URLError, socket.error, IOError, httplib.error), e: pywikibot.output(u'Can\'t retrieve page %s : %s' % (ref.url, e)) continue except ValueError: # Known bug of httplib, google for : # "httplib raises ValueError reading chunked content" continue
def standardizePageFooter(self, text): """ Makes sure that interwiki links, categories and star templates are put to the correct position and into the right order. This combines the old instances standardizeInterwiki and standardizeCategories The page footer has the following section in that sequence: 1. categories 2. additional information depending on local site policy 3. stars templates for featured and good articles 4. interwiki links """ starsList = [ u"bueno", u"cyswllt[ _]erthygl[ _]ddethol", u"dolen[ _]ed", u"destacado", u"destaca[tu]", u"enllaç[ _]ad", u"enllaz[ _]ad", u"leam[ _]vdc", u"legătură[ _]a[bcf]", u"liamm[ _]pub", u"lien[ _]adq", u"lien[ _]ba", u"liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt", u"liên[ _]kết[ _]chọn[ _]lọc", u"ligam[ _]adq", u"ligoelstara", u"ligoleginda", u"link[ _][afgu]a", u"link[ _]adq", u"link[ _]f[lm]", u"link[ _]km", u"link[ _]sm", u"linkfa", u"na[ _]lotura", u"nasc[ _]ar", u"tengill[ _][úg]g", u"ua", u"yüm yg", u"רא", u"وصلة مقالة جيدة", u"وصلة مقالة مختارة", ] categories = None interwikiLinks = None allstars = [] hasCommentLine = False # The PyWikipediaBot is no longer allowed to touch categories on the German Wikipedia. # See http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/bis_2006#Position_der_Personendaten_am_.22Artikelende.22 # ignoring nn-wiki of cause of the comment line above iw section if not self.template and not "{{Personendaten" in text: categories = pywikibot.getCategoryLinks(text, site=self.site) if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki': subpage = False if self.template: loc = None try: tmpl, loc = moved_links[self.site.lang] del tmpl except KeyError: pass if loc != None and loc in self.title: subpage = True interwikiLinks = pywikibot.getLanguageLinks(text, insite=self.site, template_subpage=subpage) # Removing the interwiki text = pywikibot.removeLanguageLinks(text, site=self.site) # Removing the stars' issue starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile("(\{\{(?:template:|)%s\|.*?\}\}[\s]*)" % star, re.I) found = regex.findall(starstext) if found != []: if pywikibot.verbose: print found text = regex.sub("", text) allstars += found # nn got a message between the categories and the iw's # and they want to keep it there, first remove it if self.site.language() == "nn": regex = re.compile( "(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)" ) found = regex.findall(text) if found: if pywikibot.verbose: print found hasCommentLine = True text = regex.sub("", text) # Adding categories if categories: text = pywikibot.replaceCategoryLinks(text, categories, site=self.site) # Put the nn iw message back if self.site.language() == "nn" and not self.talkpage and (interwikiLinks or hasCommentLine): text = text + "\r\n\r\n" + nn_iw_msg # Adding stars templates if allstars: text = text.strip() + self.site.family.interwiki_text_separator allstars.sort() for element in allstars: text += "%s\r\n" % element.strip() if pywikibot.verbose: pywikibot.output(u"%s" % element.strip()) # Adding the interwiki if interwikiLinks: text = pywikibot.replaceLanguageLinks( text, interwikiLinks, site=self.site, template=self.template, template_subpage=subpage ) return text
def add_text(page = None, addText = None, summary = None, regexSkip = None, regexSkipUrl = None, always = False, up = False, putText = True, oldTextGiven = None, reorderEnabled = True, create=False): if not addText: raise NoEnoughData('You have to specify what text you want to add!') if not summary: summary = i18n.twtranslate(pywikibot.getSite(), 'add_text-adding', {'adding': addText[:200]}) # When a page is tagged as "really well written" it has a star in the # interwiki links. This is a list of all the templates used (in regex # format) to make the stars appear. starsList = [ u'bueno', u'bom interwiki', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] errorCount = 0 site = pywikibot.getSite() # /wiki/ is not always the right path in non-wiki projects pathWiki = site.family.nicepath(site.lang) if putText: pywikibot.output(u'Loading %s...' % page.title()) if oldTextGiven == None: try: text = page.get() except pywikibot.NoPage: if create: pywikibot.output(u"%s doesn't exist, creating it!" % page.title()) text = u'' else: pywikibot.output(u"%s doesn't exist, skip!" % page.title()) return (False, False, always) # continue except pywikibot.IsRedirectPage: pywikibot.output(u"%s is a redirect, skip!" % page.title()) return (False, False, always) # continue else: text = oldTextGiven # Understand if the bot has to skip the page or not # In this way you can use both -except and -excepturl if regexSkipUrl != None: url = '%s%s' % (pathWiki, page.urlname()) result = re.findall(regexSkipUrl, site.getUrl(url)) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -exceptUrl is in the page. Skipping! Match was: %s''' % result) return (False, False, always) # continue if regexSkip != None: result = re.findall(regexSkip, text) if result != []: pywikibot.output( u'''Exception! regex (or word) used with -except is in the page. Skipping! Match was: %s''' % result) return (False, False, always) # continue # If not up, text put below if not up: newtext = text # Translating the \\n into binary \n addText = addText.replace('\\n', '\n') if (reorderEnabled): # Getting the categories categoriesInside = pywikibot.getCategoryLinks(newtext, site) # Deleting the categories newtext = pywikibot.removeCategoryLinks(newtext, site) # Getting the interwiki interwikiInside = pywikibot.getLanguageLinks(newtext, site) # Removing the interwiki newtext = pywikibot.removeLanguageLinks(newtext, site) # nn got a message between the categories and the iw's # and they want to keep it there, first remove it hasCommentLine = False if (site.language()==u'nn'): regex = re.compile('(<!-- ?interwiki \(no(?:/nb)?, ?sv, ?da first; then other languages alphabetically by name\) ?-->)') found = regex.findall(newtext) if found: hasCommentLine = True newtext = regex.sub('', newtext) # Adding the text newtext += u"\n%s" % addText # Reputting the categories newtext = pywikibot.replaceCategoryLinks(newtext, categoriesInside, site, True) #Put the nn iw message back if site.language()==u'nn' and (interwikiInside or hasCommentLine): newtext = newtext + u'\r\n\r\n' + nn_iw_msg # Dealing the stars' issue allstars = [] starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: newtext = regex.sub('', newtext) allstars += found if allstars != []: newtext = newtext.strip()+'\r\n\r\n' allstars.sort() for element in allstars: newtext += '%s\r\n' % element.strip() # Adding the interwiki newtext = pywikibot.replaceLanguageLinks(newtext, interwikiInside, site) else: # Adding the text newtext += u"\n%s" % addText # If instead the text must be added above... else: newtext = addText + '\n' + text if putText and text != newtext: pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) pywikibot.showDiff(text, newtext) # Let's put the changes. while True: # If someone load it as module, maybe it's not so useful to put the # text in the page if putText: if not always: choice = pywikibot.inputChoice( u'Do you want to accept these changes?', ['Yes', 'No', 'All', 'open in Browser'], ['y', 'N', 'a', 'b'], 'N') if choice == 'a': always = True elif choice == 'n': return (False, False, always) elif choice == 'b': webbrowser.open("http://%s%s" % ( page.site().hostname(), page.site().nice_get_address(page.title()) )) pywikibot.input("Press Enter when finished in browser.") if always or choice == 'y': try: if always: page.put(newtext, summary) else: page.put_async(newtext, summary) except pywikibot.EditConflict: pywikibot.output(u'Edit conflict! skip!') return (False, False, always) except pywikibot.ServerError: errorCount += 1 if errorCount < 5: pywikibot.output(u'Server Error! Wait..') time.sleep(5) continue else: raise pywikibot.ServerError(u'Fifth Server Error!') except pywikibot.SpamfilterError, e: pywikibot.output( u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) return (False, False, always) except pywikibot.PageNotSaved, error: pywikibot.output(u'Error putting page: %s' % error.args) return (False, False, always) except pywikibot.LockedPage: pywikibot.output(u'Skipping %s (locked page)' % page.title()) return (False, False, always) else: # Break only if the errors are one after the other... errorCount = 0 return (True, True, always)
def standardizePageFooter(self, text): """ Makes sure that interwiki links, categories and star templates are put to the correct position and into the right order. This combines the old instances standardizeInterwiki and standardizeCategories The page footer has the following section in that sequence: 1. categories 2. additional information depending on local site policy 3. stars templates for featured and good articles 4. interwiki links """ starsList = [ u'bueno', u'bom interwiki', u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed', u'destacado', u'destaca[tu]', u'enllaç[ _]ad', u'enllaz[ _]ad', u'leam[ _]vdc', u'legătură[ _]a[bcf]', u'liamm[ _]pub', u'lien[ _]adq', u'lien[ _]ba', u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt', u'liên[ _]kết[ _]chọn[ _]lọc', u'ligam[ _]adq', u'ligoelstara', u'ligoleginda', u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km', u'link[ _]sm', u'linkfa', u'na[ _]lotura', u'nasc[ _]ar', u'tengill[ _][úg]g', u'ua', u'yüm yg', u'רא', u'وصلة مقالة جيدة', u'وصلة مقالة مختارة', ] categories = None interwikiLinks = None allstars = [] hasCommentLine = False # The PyWikipediaBot is no longer allowed to touch categories on the # German Wikipedia. See # http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22 # ignoring nn-wiki of cause of the comment line above iw section if not self.template and not '{{Personendaten' in text: categories = pywikibot.getCategoryLinks(text, site = self.site) if not self.talkpage:# and pywikibot.calledModuleName() <> 'interwiki': subpage = False if self.template: loc = None try: tmpl, loc = moved_links[self.site.lang] del tmpl except KeyError: pass if loc != None and loc in self.title: subpage = True interwikiLinks = pywikibot.getLanguageLinks( text, insite=self.site, template_subpage=subpage) # Removing the interwiki text = pywikibot.removeLanguageLinks(text, site = self.site) # Removing the stars' issue starstext = pywikibot.removeDisabledParts(text) for star in starsList: regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)' % star, re.I) found = regex.findall(starstext) if found != []: if pywikibot.verbose: print found text = regex.sub('', text) allstars += found # nn got a message between the categories and the iw's # and they want to keep it there, first remove it if self.site.lang in msg_interwiki: iw_msg = msg_interwiki[self.site.lang] if isinstance(iw_msg, tuple): iw_reg = iw_msg[1] iw_msg = iw_msg[0] else: iw_reg = u'(%s)' % iw_msg regex = re.compile(iw_reg) found = regex.findall(text) if found: if pywikibot.verbose: print found hasCommentLine = True text = regex.sub('', text) # Adding categories if categories: text = pywikibot.replaceCategoryLinks(text, categories, site=self.site) # Put the iw message back if not self.talkpage and \ ((interwikiLinks or hasCommentLine) and self.site.language() == 'nn' or (interwikiLinks and hasCommentLine) and self.site.language() == 'fr'): text = text + '\r\n\r\n' + iw_msg # Adding stars templates if allstars: text = text.strip()+self.site.family.interwiki_text_separator allstars.sort() for element in allstars: text += '%s\r\n' % element.strip() if pywikibot.verbose: pywikibot.output(u'%s' %element.strip()) # Adding the interwiki if interwikiLinks: text = pywikibot.replaceLanguageLinks(text, interwikiLinks, site=self.site, template=self.template, template_subpage=subpage) return text