示例#1
0
    def delete_page(self, wiki, page_list, delete_image=False):
        if not self._check_login_status(wiki):
            self.__log.critical('delete_page: wiki not logged in.')
            return False

        if not isinstance(page_list, list):
            self.__log.critical('delete_page: {0} not list.'.format(page_list))
            return False

        result = []

        for page_title in page_list:
            page = Page(wiki, title=page_title)
            if not page.exists:
                result.append(False)
                self.__log.debug('delete_page: Page {0} not exist.'.format(page_title))
                continue

            wiki_text = page.getWikiText()

            if delete_image:
                image_list = self._parse_image_title(wiki_text)
                result.append(self.delete_image(wiki, image_list))

            try:
                result.append(page.delete())
                self.__log.debug('delete_page: Page {0} deleted'.format(page_title))
            except:
                self.__log.debug('delete_page: Something went wrong when deleting Page {0}.'.format(page_title))
                result.append(False)

        return all(result)
示例#2
0
    def get_page(self, title):
        try:
            target_page = Page(self.site, title)
        except api.APIError as e:
            if e.args[0] == 'missingtitle':
                self.logger.error(
                    u"Article {} does not exist, skipped".format(title))
            else:
                self.logger.error(
                    u"review article for Article {} got exception {}".format(
                        title, e))

            return None
        except NoPage as e:
            self.logger.error(
                u"Article {} page does not exist, skipped".format(title))
            return None

        if not target_page.exists:
            self.logger.error(
                u"Article {} page does not exist, skipped".format(title))
            return None

        categories = target_page.getCategories(True)
        if self.search_disambig and 'Category:Disambiguation pages' in categories:
            pages = []
            for disambig_title in self.search_disambiguation_page(title):
                pages.append(self.get_page(disambig_title))

            return pages[0] if len(pages) > 0 else None

        return target_page
    def get_page(self, title):
        try:
            target_page = Page(self.site, title)
        except api.APIError as e:
            if e.args[0] == 'missingtitle':
                self.logger.error(u"Article {} does not exist, skipped".format(title))
            else:
                self.logger.error(u"review article for Article {} got exception {}".format(title, e))

            return None
        except NoPage as e:
            self.logger.error(u"Article {} page does not exist, skipped".format(title))
            return None

        if not target_page.exists:
            self.logger.error(u"Article {} page does not exist, skipped".format(title))
            return None

        categories = target_page.getCategories(True)
        if self.search_disambig and 'Category:Disambiguation pages' in categories:
            pages = []
            for disambig_title in self.search_disambiguation_page(title):
                pages.append(self.get_page(disambig_title))

            return pages[0] if len(pages) > 0 else None

        return target_page
示例#4
0
    def _process_page(self, cursor):
        dyk = Page(self._site, title="Template talk:Did you know")
        text = dyk.getWikiText()
        text = text.decode("utf-8")
        parsed = Parser.parse(text)
        for name in parsed.filter_templates():
            name = unicode(name)
            if name.startswith("{{Template:Did you know nominations") \
                or name.startswith("{{Did you know nominations"):
                name = unicode(name).replace("{{", "Template:").replace(
                    "}}", "")
                name = name.replace("Template:Template:", "Template:")
                self.templates.append(name)
            else:
                continue

        cursor.execute(self.create_query)
        cursor.execute("SELECT COUNT(*) FROM did_you_know")
        if cursor.fetchone()["COUNT(*)"] >= 1:
            print "A"
            templates = self.templates
            cursor.execute("SELECT * FROM did_you_know")
            data = cursor.fetchall()
            for item in data:
                if item["name"] in templates:
                    templates.remove(item["name"])
                else:
                    continue
            try:
                self._handle_sql_query(cursor, templates=templates)
            except Exception, e:
                print "self._handle_sql_query() threw exception: " + e
                return False
            return True
示例#5
0
    def _handle_pages(self, cursor):
        cursor.execute("""SELECT name, creator FROM did_you_know 
                          WHERE to_be_handled = 1;
                       """)
        data = cursor.fetchall()
        for item in data:
            title = item["name"]
            creator = item["creator"]
#           user = User(self._site, creator, check=True)
            user = User(self._site, "Ceradon", check=True)
            if not user.exists:
                continue
            if user.isBlocked():
                continue
            if user.isIP:
                continue
            user_talk = user.getTalkPage(check=True)
            text = user_talk.getWikiText()
            message = "\n==Message from Cerabot==\n" \
                    "{0}SUBST:User:Cerabot/Umbox|article={1}|" \
                    "include_links=yes".format("{{", title.split("/")[1])
            newtext = text + message + "}}"
            summary = "Notifying [[User:{0}|{0}]] of [[{1}|Did you " \
                "know nomination]] ([[User:Cerabot/Run/Task 2|" \
                "bot]])".format(user.name, title)
            check_page = Page(self._site, "User:Cerabot/Run/Task 2")
            check_text = check_page.getWikiText()
            if not check_text.strip().lower() == "yes":
                return
            user_talk.edit(text=newtext, summary=summary, bot=True,
                minor=True)
            print "I'm done"; exit() # Only do one page for now
        return
示例#6
0
 def _database_cleanup(self, cursor):
     
     dyk = Page(self._site, title="Template talk:Did you know")
     text = dyk.getWikiText()
     text = text.decode("utf-8")
     parsed = Parser.parse(text)
     for name in parsed.filter_templates():
         name = unicode(name)
         if name.startswith("{{Template:Did you know nominations") \
             or name.startswith("{{Did you know nominations"):
             name = unicode(name).replace("{{", "Template:").replace(
                 "}}", "")
             name = name.replace("Template:Template:", "Template:")
示例#7
0
 def _handle_sql_query(self, cursor, templates=None):
     q = []
     for template in templates:
         print "C"
         dyk, article = (Page(self._site, title=template), Page(
             self._site, title=template.split("/")[1]))
         print dyk, article
         categories = dyk.getCategories()
         s = " ".join(categories)
         passed = findall("Category:Passed DYK nominations from", s)
         failed = findall("Category:Failed DYK nominations from", s)
         if passed or failed:
             self.to_be_removed.append(dyk)
             continue
         try:
             a = article.getHistory(direction="newer", content=False, 
                 limit=1)[0]
             d = dyk.getHistory(direction="newer", content=False, 
                 limit=1)[0]
         except Exception, e:
             print e
             continue
         values = {
             "name":unicode(template),
             "creator":unicode(a["user"]),
             "nominator":unicode(d["user"]),
             "timestamp":unicode(a["timestamp"]),
             "to_be_handled":0
         }
         if a["user"].lower() != d["user"].lower():
             values["to_be_handled"] = 1
         cursor.execute(self.insert_query, (
             values["name"], 
             values["to_be_handled"], 
             values["creator"], 
             values["nominator"],
             values["timestamp"]
         ))
         delete_string = ", ".join(self.to_be_removed)
         cursor.execute(self.delete_query.format(delete_string))
示例#8
0
    def copy_page(self, src_wiki, dest_wiki, page_list):
        """
        @summary:
        @param src_wiki:
        @param dest_wiki:
        @param page_list:
        @return:
        """
        if not self._check_login_status(src_wiki):
            self.__log.critical('copy_page: Source Wiki not logged in.')
            return False

        if not self._check_login_status(dest_wiki):
            self.__log.critical('copy_page: Destination Wiki not logged in.')
            return False

        if not isinstance(page_list, list):
            self.__log.critical('copy_page: {0} not list.'.format(page_list))
            return False

        result = []

        for page_title in page_list:
            src_page = Page(src_wiki, title=page_title)

            if not src_page.exists:
                result.append(False)
                self.__log.critical('copy_page: Page {0} not exist.'.format(page_title))
                continue

            dest_page = Page(dest_wiki, title=page_title)
            wiki_text = src_page.getWikiText()

            try:
                result.append(dest_page.edit(wiki_text))
            except:
                result.append(False)
                self.__log.critical('copy_page: Something went wrong when editing destination page {0}.'.format(page_title))

            image_list = self._parse_image_title(wiki_text)
            result.append(self.copy_image(src_wiki, dest_wiki, image_list))

        return all(result)
def process():
    parser = argparse.ArgumentParser(
        description='Traveller Wiki create world articles.',
        fromfile_prefix_chars='@')
    parser.add_argument('--skip-list',
                        help='file of worlds to skip adding/updating')
    parser.add_argument(
        '-c',
        '--category',
        action='append',
        help='File with list of worlds to append different category')
    parser.add_argument('-s',
                        '--source',
                        action='append',
                        help='File with list of worlds to append a source')
    parser.add_argument('--log-level', default='INFO')

    parser.add_argument('--site',
                        dest='site',
                        default='https://wiki.travellerrpg.com/api.php')
    parser.add_argument(
        '--user',
        dest='user',
        default='AB-101',
        help='(Bot) user to connect to the wiki, default [AB-101]')
    parser.add_argument('--search-disambig',
                        help='Search value to refine disambiguation title')

    parser.add_argument('--save-to-wiki',
                        dest='save',
                        default=False,
                        action='store_true',
                        help='Save the generated pages to the wiki')
    parser.add_argument('sector',
                        nargs='*',
                        help='T5SS sector file(s) to process')

    args = parser.parse_args()
    set_logging(args.log_level)

    worlds = WikiCreateWorld()
    skip_list = get_skip_list(args.skip_list) if args.skip_list else []

    site = WikiReview.get_site(args.user, args.site)
    wiki_review = WikiReview(site, None, args.search_disambig, 1000)

    category_list = get_category_list(args.category)
    sources_list = get_sources_list(args.source)

    galaxy = worlds.read_sector(args.sector)
    for star in galaxy.stars:
        if star.name in skip_list:
            continue

        wiki_page = wiki_review.get_page(star.wiki_short_name())
        wiki_page = wiki_page[0] if isinstance(wiki_page,
                                               (list, tuple)) else wiki_page
        if wiki_page is None:

            logger.info("Unable to find: {}, creating new page".format(
                star.name))
            wiki_page = Page(site, star.wiki_short_name())

        # Alpha (world)
        title = wiki_page.title[:-8]
        logger.info(u"Processing {} -> {}".format(star.name, wiki_page.title))

        categories = category_list[
            star.name] if star.name in category_list else None
        sources = sources_list[star.name] if star.name in sources_list else []

        new_page = worlds.create_page(star, categories, sources, title)

        # print new_page
        # print "=============================================="
        if args.save:
            logger.info("Saving Page: %s", wiki_page.title)
            result = wiki_review.save_page(wiki_page, new_page, create=True)
            logger.info("Save result: %s", result)
示例#10
0
def main():
    "The main function."
    wiki = pywikibot.Site("en", "wikipedia")
    wiki.login()
    global wikitools_wiki
    wikitools_login()

    wpgo = pywikibot.Page(wiki, "Wikipedia:Goings-on")
    wpgo_content = wpgo.get()
    new_fc = wpgo_content[wpgo_content.find("==New featured content=="):]

    # Trim it down to just the list of featured content
    new_fc = new_fc[:new_fc.find("|-") - 2]

    # Remove the section heading
    new_fc = new_fc[len("==New featured content=="):]

    # Create fc_cats, which looks like this: {type: [title of content]}
    fc_cats = dict()
    for fc_cat in re.finditer(WP_GO_HEADING, new_fc):
        fc_cat_name = fc_cat.groups()[0]
        fc_cat_raw_list = new_fc[fc_cat.start():]
        fc_cat_raw_list = fc_cat_raw_list[len(fc_cat_name) + 1:]
        next_heading = re.search(WP_GO_HEADING, fc_cat_raw_list)
        if next_heading:
            fc_cat_raw_list = fc_cat_raw_list[:next_heading.start()]
        fc_cat_raw_list = fc_cat_raw_list.strip()

        # Now that we have just the list, parse out the items
        for fc_item in re.finditer(WP_GO_ITEM, fc_cat_raw_list):
            name, _, label, date = fc_item.groups()
            print u"{} (a {}) was promoted on {}".format(label if label else name, fc_cat_name[:-1], date)
            fc_cats[fc_cat_name] = fc_cats.get(fc_cat_name, []) + [(name,
                                                                    label,
                                                                    date)]

    # Get notification metadata
    for fc_cat, fc_items in fc_cats.items():
        def add_metadata(fc_item):
            name, label, date = fc_item
            nom_link = "Wikipedia:Featured " + fc_cat[:-1] + " candidates/"
            if fc_cat == "pictures":
                nom_link += label[2:-2] if "''" in label else label
                #if not WikitoolsPage(wikitools_wiki, title=nom_link).exists:
                if not wiki.page_exists(nom_link):
                    print(nom_link + " DOESN'T EXIST")
            else:
                nom_link += name[2:-2] if "''" in name else name
                nom_link += "/archive1"
            return (name, label, date, nom_link)
        fc_cats[fc_cat] = map(add_metadata, fc_items)

    # Build "report"
    report = ""
    for fc_cat, fc_items in fc_cats.items():
        report += "\n\n===Featured {}===".format(fc_cat)
        report += "\n{} {} were promoted this week.".format(len(fc_items),
                                                            FC_LINKS[fc_cat])
        for fc_item in fc_items:
            name, label, date, nom_link = fc_item
            piped = "|" + label if label else ""
            report += u"\n* '''[[{}{}]]''' <small>([[{}|nominated]] by [[User:Example|Example]])</small> Description.".format(name, piped, nom_link)
    report = report.strip()

    # Write report to Wikipedia
    report_page = WikitoolsPage(wikitools_wiki, title="User:APersonBot/sandbox")
    print("Editing report page...")
    result = report_page.edit(text=report.encode("ascii", "ignore"),
                              bot=True,
                              summary="Test FC report")
    if result[u"edit"][u"result"] == u"Success":
        print "Success!"
    else:
        print "Error! Couldn't write report - result: {}".format(str(result))
示例#11
0
  'action': 'query',
  'list': 'allpages',
  'apfilterredir': 'nonredirects',
  'aplimit': '500',
}
titles = set()
req = APIRequest(wiki, params)
for result in req.queryGen():
  for article in result['query']['allpages']:
    titles.add(article['title'])
titles = list(titles)
titles.sort()
print 'Found', len(titles), 'pages'

for title in titles:
  page = Page(wiki, title)
  page.getWikiText()
  text = page.getWikiText().lower()
  printed_link = False
  for pair in pairs:
    if text.count(pair[0]) != text.count(pair[1]):
      if not printed_link:
        print '='*80
        print 'https://wiki.teamfortress.com/w/index.php?action=edit&title=%s' % quote(title.encode('utf-8'))
        printed_link = True
      indices = find_mismatch(text, pair)
      print '-'*80
      print pair
      for index in indices:
        print '-'*80
        print text[index-100:index+100]
示例#12
0
def uploadWorlds(site, sectorFile, economicFile, era):
    data_template = u'''
{{{{StellarData
 |world     = {0}
 |sector    = {1}
 |subsector = {2}
 |era       = {31}
 |hex       = {3}
 |name      = {4}
 |UWP       = {5}
 |pcode     = {6}
 |codes     = {7}
 |sophonts  = {8}
 |details   = {9}
 |ix        = {10}
 |ex        = {11}
 |cx        = {12}
 |nobility  = {13}
 |bases     = {14}
 |zone      = {15}
 |popmul    = {16}
 |belts     = {17}
 |giants    = {18}
 |worlds    = {19}
 |aleg      = {20}
 |stars     = {21}
 |wtn       = {22}
 |ru        = {23}
 |gwp       = {24}
 |trade     = {25}
 |pass      = {26}
 |build     = {27}
 |army      = {28}
 |portSize  = {29}
 |spa       = {30}
 |mspr      = {32}
}}}}'''

    page_template = u'''{{{{StellarDataQuery|name={{{{World|{0}|{1}|{2}|{3}}}}} }}}}

== Astrography and planetology ==
No information yet available. 
 
== History and background ==
No information yet available. 

== References and contributors ==
{{{{Incomplete}}}}
{{{{Source}}}}
{{{{LEN}}}}
'''
    try:
        sectorLines = [line for line in codecs.open(sectorFile, 'r', 'utf-8')]
    except (OSError, IOError):
        logger.error(u"Sector file not found: {}".format(sectorFile))
        return

    sectorData = [line.split(u"||") for line in sectorLines[5:]
                  if not (line.startswith(u'|-') or line.startswith(u'<section')
                          or line.startswith(u'|}') or line.startswith(u'[[Category:'))]

    try:
        economicLines = [line for line in codecs.open(economicFile, 'r', 'utf-8')]
    except (OSError, IOError):
        logger.error(u"Economic file not found: {}".format(economicFile))
        return
    economicData = [line.split(u"||") for line in economicLines[5:]
                    if not (line.startswith(u'|-') or line.startswith(u'<section')
                            or line.startswith(u'|}') or line.startswith(u'[[Category:'))]

    sectorName = economicLines[2][3:-15]
    logger.info(u"Uploading {}".format(sectorName))
    for sec, eco in zip(sectorData, economicData):

        if not sec[0] == eco[0]:
            logger.error(u"{} is not equal to {}".format(sec[0], eco[0]))
            break
        subsectorName = eco[14].split(u'|')[1].strip(u'\n').strip(u']')
        pcodes = ['As', 'De', 'Ga', 'Fl', 'He', 'Ic', 'Oc', 'Po', 'Va', 'Wa']
        dcodes = ['Cp', 'Cx', 'Cs', 'Mr', 'Da', 'Di', 'Pz', 'An', 'Ab', 'Fo', 'Px',
                  'Re', 'Rs', 'Sa', 'Tz', 'Lk',
                  'RsA', 'RsB', 'RsG', 'RsD', 'RsE', 'RsZ', 'RsT',
                  'Fr', 'Co', 'Tp', 'Ho', 'Tr', 'Tu',
                  'Cm', 'Tw']
        codes = sec[3].split()
        pcode = set(pcodes) & set(codes)
        dcode = set(dcodes) & set(codes)

        owned = [code for code in codes if code.startswith(u'O:') or code.startswith(u'C:')]
        homeworlds = re.findall(ur"\([^)]+\)\S?", sec[3], re.U)

        codeCheck = set(codes) - dcode - set(owned) - set(homeworlds)
        sophCodes = [code for code in codeCheck if len(code) > 4]

        sophonts = homeworlds + sophCodes

        codeset = set(codes) - dcode - set(owned) - set(sophCodes) - set(homeworlds)

        if len(pcode) > 0:
            pcode = sorted(list(pcode))[0]
        else:
            pcode = ''

        colony = [code if len(code) > 6 else u'O:' + sectorName[0:4] + u'-' + code[2:]
                  for code in owned if code.startswith(u'O:')]
        parent = [code if len(code) > 6 else u'C:' + sectorName[0:4] + u'-' + code[2:]
                  for code in owned if code.startswith(u'C:')]
        dcode = list(dcode) + colony + parent

        starparts = sec[13].split()
        stars = []

        for x, y in pairwise(starparts):
            if y in ['V', 'IV', 'Ia', 'Ib', 'II', 'III']:
                stars.append(' '.join((x, y)))
            else:
                stars.append(x)
                stars.append(y)
        if len(starparts) % 2 == 1:
            stars.append(starparts[-1])

        hexNo = sec[0].strip(u'|').strip()
        worldPage = eco[1].strip() + u" (world)"

        worldName = worldPage.split(u'(')
        shortName = shortNames[sectorName]
        worldPage = worldName[0] + u'(' + shortName + u' ' + hexNo + u') (' + worldName[1]

        site.search_disambig = worldName
        target_page = site.get_page(worldPage)

        pages = [target_page] if not isinstance(target_page, (list, tuple)) else target_page

        for page in pages:
            pass

        try:
            target_page = Page(site, worldPage)
            # First, check if this is a disambiguation page, if so generate
            # the alternate (location) name
            categories = target_page.getCategories(True)
            if 'Category:Disambiguation pages' in categories:
                worldName = worldPage.split(u'(')
                shortName = shortNames[sectorName]
                worldPage = worldName[0] + u'(' + shortName + u' ' + hexNo + u') (' + worldName[1]
                target_page = Page(site, worldPage)

            # Second, check if this page was redirected to another page
            if target_page.title != worldPage:
                logger.info(u"Redirect {} to {}".format(worldPage, target_page.title))
                worldPage = target_page.title

        except NoPage:
            logger.info(u"Missing Page: {}".format(worldPage))
            page_data = page_template.format(eco[1].strip(), sectorName, subsectorName, hexNo)
            target_page = Page(site, worldPage)
            try:
                result = target_page.edit(text=page_data, summary='Trade Map update created world page',
                                          bot=True, skipmd5=True)

                if result['edit']['result'] == 'Success':
                    logger.info(u'Saved: {}'.format(worldPage))
            except api.APIError as e:
                logger.error(u"UploadSummary for page {} got exception {} ".format(worldPage, e))
                continue

        data = data_template.format(eco[1].strip(),  # World
                                    sectorName,  # Sector
                                    subsectorName,  # Subsector
                                    hexNo,  # hex
                                    worldPage,  # Name
                                    sec[2].strip(),  # UWP
                                    pcode,  # pcode
                                    u','.join(sorted(list(codeset))),  # codes
                                    u','.join(sophonts),  # sophonts
                                    u','.join(sorted(list(dcode))),  # details
                                    sec[4].strip(u'{}'),  # ix (important)
                                    sec[5].strip(u'()'),  # ex (economic)
                                    sec[6].strip(u'[]'),  # cx (cultural)
                                    sec[7].strip(),  # nobility
                                    sec[8].strip(),  # bases
                                    sec[9].strip(),  # Zone
                                    sec[10][0],  # pop mult
                                    sec[10][1],  # Belts
                                    sec[10][2],  # GG Count
                                    sec[11],  # Worlds
                                    sec[12],  # Allegiance
                                    u','.join(stars),  # stars
                                    int(eco[5].strip()),  # wtn
                                    eco[6].strip(),  # RU
                                    eco[7].strip(),  # GWP
                                    eco[8].strip(),  # Trade
                                    eco[9].strip(),  # Passengers
                                    eco[10].strip(),  # build capacity
                                    eco[11].strip(),  # Army
                                    eco[12].strip(),  # port size
                                    eco[13].strip(),  # spa population
                                    era,  # era
                                    eco[14].strip()  # MSPR
                                    )
        try:
            target_page = Page(site, worldPage + u'/data')
            if target_page.exists:
                page_text = unicode(target_page.getWikiText(), 'utf-8')
                templates = re.findall(ur"{{[^}]*}}", page_text, re.U)
                era_name = u"|era       = {}".format(era)
                newtemplates = [template if not era_name in template else data
                                for template in templates]
                newdata = u'\n'.join(newtemplates)
                if era_name not in newdata:
                    newtemplates.insert(0, data)
                    newdata = u'\n'.join(newtemplates)

                if newdata == page_text:
                    logger.info(u'No changes to template: skipping {}'.format(worldPage))
                    continue
                data = newdata
            result = target_page.edit(text=data, summary='Trade Map update world data',
                                      bot=True, skipmd5=True)

            if result['edit']['result'] == 'Success':
                logger.info(u'Saved: {}/data'.format(worldPage))
            else:
                logger.error(u'Save failed {}/data'.format(worldPage))
        except api.APIError as e:
            if e.args[0] == 'missingtitle':
                logger.error(u"UploadSummary for page {}, page does not exist".format(worldPage))
            else:
                logger.error(u"UploadSummary for page {} got exception {} ".format(worldPage, e))
示例#13
0
    'action': 'query',
    'list': 'allpages',
    'apfilterredir': 'nonredirects',
    'aplimit': '500',
}
titles = set()
req = APIRequest(wiki, params)
for result in req.queryGen():
    for article in result['query']['allpages']:
        titles.add(article['title'])
titles = list(titles)
titles.sort()
print 'Found', len(titles), 'pages'

for title in titles:
    page = Page(wiki, title)
    page.getWikiText()
    text = page.getWikiText().lower()
    printed_link = False
    for pair in pairs:
        if text.count(pair[0]) != text.count(pair[1]):
            if not printed_link:
                print '=' * 80
                print 'https://wiki.teamfortress.com/w/index.php?action=edit&title=%s' % quote(
                    title.encode('utf-8'))
                printed_link = True
            indices = find_mismatch(text, pair)
            print '-' * 80
            print pair
            for index in indices:
                print '-' * 80
示例#14
0
def main():
    "The main function."
    wiki = pywikibot.Site("en", "wikipedia")
    wiki.login()
    global wikitools_wiki
    wikitools_login()

    wpgo = pywikibot.Page(wiki, "Wikipedia:Goings-on")
    wpgo_content = wpgo.get()
    new_fc = wpgo_content[wpgo_content.find("==New featured content=="):]

    # Trim it down to just the list of featured content
    new_fc = new_fc[:new_fc.find("|-") - 2]

    # Remove the section heading
    new_fc = new_fc[len("==New featured content=="):]

    # Create fc_cats, which looks like this: {type: [title of content]}
    fc_cats = dict()
    for fc_cat in re.finditer(WP_GO_HEADING, new_fc):
        fc_cat_name = fc_cat.groups()[0]
        fc_cat_raw_list = new_fc[fc_cat.start():]
        fc_cat_raw_list = fc_cat_raw_list[len(fc_cat_name) + 1:]
        next_heading = re.search(WP_GO_HEADING, fc_cat_raw_list)
        if next_heading:
            fc_cat_raw_list = fc_cat_raw_list[:next_heading.start()]
        fc_cat_raw_list = fc_cat_raw_list.strip()

        # Now that we have just the list, parse out the items
        for fc_item in re.finditer(WP_GO_ITEM, fc_cat_raw_list):
            name, _, label, date = fc_item.groups()
            print u"{} (a {}) was promoted on {}".format(
                label if label else name, fc_cat_name[:-1], date)
            fc_cats[fc_cat_name] = fc_cats.get(fc_cat_name,
                                               []) + [(name, label, date)]

    # Get notification metadata
    for fc_cat, fc_items in fc_cats.items():

        def add_metadata(fc_item):
            name, label, date = fc_item
            nom_link = "Wikipedia:Featured " + fc_cat[:-1] + " candidates/"
            if fc_cat == "pictures":
                nom_link += label[2:-2] if "''" in label else label
                #if not WikitoolsPage(wikitools_wiki, title=nom_link).exists:
                if not wiki.page_exists(nom_link):
                    print(nom_link + " DOESN'T EXIST")
            else:
                nom_link += name[2:-2] if "''" in name else name
                nom_link += "/archive1"
            return (name, label, date, nom_link)

        fc_cats[fc_cat] = map(add_metadata, fc_items)

    # Build "report"
    report = ""
    for fc_cat, fc_items in fc_cats.items():
        report += "\n\n===Featured {}===".format(fc_cat)
        report += "\n{} {} were promoted this week.".format(
            len(fc_items), FC_LINKS[fc_cat])
        for fc_item in fc_items:
            name, label, date, nom_link = fc_item
            piped = "|" + label if label else ""
            report += u"\n* '''[[{}{}]]''' <small>([[{}|nominated]] by [[User:Example|Example]])</small> Description.".format(
                name, piped, nom_link)
    report = report.strip()

    # Write report to Wikipedia
    report_page = WikitoolsPage(wikitools_wiki,
                                title="User:APersonBot/sandbox")
    print("Editing report page...")
    result = report_page.edit(text=report.encode("ascii", "ignore"),
                              bot=True,
                              summary="Test FC report")
    if result[u"edit"][u"result"] == u"Success":
        print "Success!"
    else:
        print "Error! Couldn't write report - result: {}".format(str(result))
示例#15
0
from wikitools.page import Page
from wikitools import wiki
import config

w = wiki.Wiki(config["wikiApi"])
w.login(config["wikiUsername"], config["wikiPassword"])

summary = (
    "Automatic Update by %s using [https://github.com/jbzdarkid/TFWiki-scripts Wikitools]" % config["wikiUsername"]
)

import wiki_edit_stats, wiki_undocumented_templates, wiki_unused_files, equipregions, external_links_analyse2, LODTables

Page(w, "Team Fortress Wiki:Reports/Users by edit count").edit(text=wiki_edit_stats.main(), summary=summary)

Page(w, "Team Fortress Wiki:Reports/Undocumented templates").edit(
    text=wiki_undocumented_templates.main(), summary=summary
)

Page(w, "Team Fortress Wiki:Reports/Unused files").edit(text=wiki_unused_files.main(), summary=summary)

text = Page(w, "Template:Equip region table").getWikiText()
start = text.index("! {{item name|")  # Start of equip regions
end = text.index("<noinclude>")  # End of table
Page(w, "Template:Equip region table").edit(text=text[:start] + equipregions.main() + text[end:], summary=summary)

Page(w, "Template:LODTable").edit(text=LODTables.main(), summary=summary)

Page(w, "Team Fortress Wiki:Reports/External links").edit(text=external_links_analyse2.main(), summary=summary)