def _api_get_new_category_pages( category: pywikibot.Category, start_time: pywikibot.Timestamp, end_time: pywikibot.Timestamp, namespaces: List[int], ) -> Iterator[Tuple[pywikibot.page.BasePage, pywikibot.Timestamp]]: """Use API to list category pages. Called by get_new_categoryPages()""" for row in pywikibot.data.api.ListGenerator( "categorymembers", site=site, cmtitle=category.title(underscore=True, with_ns=True), cmprop="title|type|timestamp", cmnamespace="|".join(str(n) for n in namespaces), cmtype="page", cmsort="timestamp", cmstart=start_time.isoformat(), cmend=end_time.isoformat(), ): if row.get("type", "page") != "page": continue yield ( pywikibot.Page(site, title=row.get("title", ""), ns=row.get("ns", "")), pywikibot.Timestamp.fromISOformat(row.get("timestamp")), )
def _db_get_new_category_pages( category: pywikibot.Category, start_time: pywikibot.Timestamp, end_time: pywikibot.Timestamp, namespaces: List[int], ) -> Iterator[Tuple[pywikibot.page.BasePage, datetime]]: """Use DB to list category pages. Called by get_new_categoryPages()""" if not wmcs: raise ConnectionError query = ("SELECT page_namespace, page_title, cl_timestamp " "FROM " " categorylinks " " JOIN page ON page_id = cl_from " "WHERE " ' cl_to = "{catname}" AND ' ' cl_type = "page" AND ' " cl_timestamp >= {start_timestamp} AND " " cl_timestamp < {end_timestamp} AND " " page_namespace in ({nslist}) " "ORDER BY cl_timestamp ").format( catname=category.title(underscore=True, with_ns=False), start_timestamp=start_time.totimestampformat(), end_timestamp=end_time.totimestampformat(), nslist=", ".join(str(n) for n in namespaces), ) for ns, title, ts in pywikibot.data.mysql.mysql_query( query, dbname=site.dbName()): yield ( pywikibot.Page(site, title=title.decode(encoding="utf-8"), ns=ns), ts, )
def find_discussion(self, category: pywikibot.Category) -> 'CfdPage': """ Return the relevant discussion. @param category: The category being discussed """ if self.section(): return self text = removeDisabledParts(self.text, tags=EXCEPTIONS, site=self.site) wikicode = mwparserfromhell.parse(text, skip_style_tags=True) for section in wikicode.get_sections(levels=[4]): heading = section.filter_headings()[0] section_title = str(heading.title).strip() discussion = self.__class__( self.site, '{}#{}'.format(self.title(), section_title)) if category.title() == section_title: return discussion # Split approximately into close, nom, and others. parts = str(section).split('(UTC)') if len(parts) < 3: continue # Parse the nom for category links. nom = mwparserfromhell.parse(parts[1], skip_style_tags=True) for node in nom.ifilter(): page = self._cat_from_node(node) if page and category == page: return discussion return self
def db_get_usage(cat: pywikibot.Category, depth: int) -> UsageResult: query = """ SELECT page_title, count(*) FROM categorylinks JOIN page ON cl_from = page_id LEFT JOIN globalimagelinks ON page_title = gil_to JOIN image ON img_name = page_title WHERE cl_to IN %(cats)s AND img_major_mime = "image" AND img_minor_mime != "svg+xml" GROUP BY page_title ORDER BY count(*) DESC """ conn = toolforge.connect("commonswiki") with conn.cursor() as cur: total = cur.execute( query, args={ "cats": [ cat.title(with_ns=False, underscore=True) for cat in list_cats(cat, depth) ] }, ) data = cast(List[Tuple[bytes, int]], cur.fetchall()) return UsageResult( [ FileUsage(f"File:{str(page, encoding='utf-8')}", count) for page, count in data ][:200], total, [], )
def redirect_cat(cat: pywikibot.Category, target: pywikibot.Category, summary: str) -> None: """ Redirect a category to another category. @param cat: Category to redirect @param target: Category redirect target @param summary: Edit summary """ tpl = Template('Category redirect') tpl.add('1', target.title(with_ns=False)) cat.text = str(tpl) cat.save(summary=summary)