コード例 #1
0
ファイル: cfdw.py プロジェクト: Rachelmorrell/JJMC89_bot
def delete_page(page: pywikibot.Page, summary: str) -> None:
    """Delete the page and dependent pages."""
    page.delete(reason=summary, prompt=False)
    if page.exists():
        return
    page_link = page.title(as_link=True)
    for redirect in page.backlinks(filter_redirects=True):
        redirect.delete(reason=SUMMARIES['redirect'].format(page_link),
                        prompt=False)
    talk_page = page.toggleTalkPage()
    if talk_page.exists():
        talk_page.delete(reason=SUMMARIES['talk'].format(page_link),
                         prompt=False)
        talk_link = talk_page.title(as_link=True)
        for redirect in talk_page.backlinks(filter_redirects=True):
            redirect.delete(reason=SUMMARIES['redirect'].format(talk_link),
                            prompt=False)
コード例 #2
0
    def get_entry_for_page(self, year, month, day, page: Page):
        # for weird syntax
        if month.endswith(','):
            month = month[:-1]
        if day.endswith(','):
            day = day[:-1]
        if str(month).isdecimal() and not str(day).isdecimal():
            # swap out month and day if necessary
            month, day = day, month
        if len(day) == 4:
            day, year = year, day
        if month in MONTH_REPLACEMENTS.keys():
            month = MONTH_REPLACEMENTS[month]

        main_page = page.toggleTalkPage()
        search_entries = ["'''[[" + main_page.title().lower()]

        for revision in main_page.revisions():
            result = MOVED_REGEX.match(revision.comment)
            if result is not None:
                old_name = result.group(1)
                old_page = self.get_mediawiki_api().get_page(old_name)
                search_entries.append("'''[[" + old_page.title().lower())
        for incoming_redirect in main_page.backlinks(filter_redirects=True,
                                                     follow_redirects=False,
                                                     namespaces=[0]):
            search_entries.append("'''[[" + incoming_redirect.title().lower())

        print(search_entries)

        archive_text = self.get_archive_page(year, month)

        for row in str(archive_text).split('\n'):
            row_to_search = row.lower()
            for regex in NAME_REPLACEMENTS:
                row_to_search = regex.sub(NAME_REPLACEMENTS[regex],
                                          row_to_search)
            for search_entry in search_entries:
                if search_entry in row_to_search:
                    text = row[1:]  # remove * from beginning
                    # you could check dates here, if wanted - please don't for now, see BRFA for more details
                    return text
        return False
コード例 #3
0
def process_list(type, formatter_class):
    """
    Post notifications about a certain type of deletions

    @param type: Deletion type
    @type type: str
    @param formatter_class: Class to be used for formatting messages
    @type formatter_class: commonsbot.formatters.Formatter
    """
    filename = 'lists/%s.txt' % type
    if not os.path.isfile(filename) or not os.path.exists(filename):
        return
    file = open(filename, 'r', encoding='utf8')
    lines = [s.strip() for s in file.readlines()]
    file.close()

    file_states = {}

    def load(store):
        nonlocal file_states
        (file_states, _) = store.load_state(lines, type)

    with_store(load)
    mapper = PerWikiMapper(NOTIFS_PER_WIKI)
    notified_files = set()

    for filename in lines:
        file = FilePage(commons, filename)
        if filename in file_states:
            state = file_states[filename]
        else:
            print('No deletion state found for %s, stubbing' % filename,
                  file=sys.stderr)
            state = DeletionState(filename, type, 'new')
            file_states[filename] = state
        state.file_page = file
        if type == 'discussion':
            state.load_discussion_info(commons)
            page = Page(commons, state.discussion_page)
            if not page.exists():
                print(
                    "Discussion page %s doesn't exist, not notifying about this file"
                    % page,
                    file=sys.stderr)
                continue

        pageset = file.globalusage(MAX_GLOBALUSAGE)
        for page in pageset:
            wiki = page.site.dbName()
            if wiki not in config.wikis:
                continue
            if page.namespace() != Namespace.MAIN:
                continue

            talk_page = page.toggleTalkPage()
            if talk_page.isRedirectPage():
                continue
            if not talk_page.botMayEdit():
                continue
            if talk_page.exists() and not talk_page.canBeEdited():
                continue
            mapper.add(filename, page)

    for page, files in mapper.files_per_page():
        states = []
        for filename in files:
            state = file_states[filename]
            states.append(state)

        try:
            spam_notifications(type, formatter_class, page.toggleTalkPage(),
                               states)
        except:
            # Error - save state to avoid reposting and then rethrow
            failed = set(states)
            failed_only = failed - notified_files

            def save(store):
                store.set_failure(type, list(failed_only))
                store.set_state(type, list(notified_files), 'notified')

            with_store(save)
            raise

        notified_files.update(states)

    with_store(lambda store: store.set_state(type, list(file_states.values()),
                                             'notified'))