def RecentChanges(limit): limitString = limit time_format = "%Y-%m-%dT%H:%M:%SZ" limit = datetime.datetime.fromtimestamp(time.mktime(time.strptime(limitString, time_format))) current = datetime.datetime.now() list = set() params = { 'action' :'query', 'list' :'recentchanges', 'rcprop' :'timestamp|title', 'rclimit' :'100', 'rcnamespace': 0 } while current > limit: textDate = current.strftime(time_format) params['rcstart'] = textDate req = Request(**params) qr = req.submit() try: current = datetime.datetime.fromtimestamp(time.mktime(time.strptime(qr['query-continue']['recentchanges']['rcstart'], time_format))) except KeyError: for elem in qr['query']['recentchanges']: list.add(elem['title']) break else: for elem in qr['query']['recentchanges']: list.add(elem['title']) return list
def review(site, token, page): """Review the latest revision of the given page.""" revid = page.latest_revision_id request = Request(site=site, action="review", token=token, revid=revid) request.submit()
def pageCounter(language): #returns number of entries for a language params = { 'action' :'expandtemplates', 'text' :'{{PAGESINCAT:%s (indeks)|R}}' % language, } req = Request(**params) qr = req.submit() print(qr['expandtemplates']['*'])
def flagLastRev(site, revid, comment=''): token = site.getToken(sysop=False) params = { 'site': site, 'action': 'review', 'revid': revid, 'token': token, 'flag_accuracy': 1, 'comment': comment, } req = Request(**params) query = req.submit()
def _load_pages(self, titles): """Load a chunk of pages from the API.""" def _get_rev(page): try: return page["revisions"][0]["slots"]["main"]["content"] except (KeyError, IndexError): return "" req = Request(self._bot.site, parameters={ "action": "query", "prop": "revisions", "rvprop": "content", "rvslots": "main", "formatversion": "2", "titles": "|".join(titles) }) data = req.submit() return [(page["title"], _get_rev(page)) for page in data["query"]["pages"]]
def test_mime_file_payload(self): """Test Request._generate_MIME_part loads binary as binary.""" local_filename = os.path.join(_images_dir, "MP_sounds.png") with open(local_filename, "rb") as f: file_content = f.read() submsg = Request._generate_MIME_part("file", file_content, ("image", "png"), {"filename": local_filename}) self.assertEqual(file_content, submsg.get_payload(decode=True))
def get_orlist(site=DEFAULT_SITE, namespace="0|6|10|14|100|828", redirects="nonredirects"): """Get list of oldreviewed pages.""" request = Request(site=site, action="query", list="oldreviewedpages", ornamespace=namespace, orfilterredir=redirects, orlimit="5000") result = [] while True: answer = request.submit() result += [page["title"] for page in answer["query"]["oldreviewedpages"]] if "query-continue" in answer: request["orstart"] = answer["query-continue"]["oldreviewedpages"]["orstart"] else: break return result
def test_mime_file_container(self): local_filename = os.path.join(_data_dir, 'MP_sounds.png') with open(local_filename, 'rb') as f: file_content = f.read() body = Request._build_mime_request({}, { 'file': (file_content, ('image', 'png'), {'filename': local_filename}) })[1] self.assertNotEqual(body.find(file_content), -1)
def test_mime_file_container(self): """Test Request._build_mime_request encodes binary.""" local_filename = os.path.join(_images_dir, "MP_sounds.png") with open(local_filename, "rb") as f: file_content = f.read() body = Request._build_mime_request( {}, {"file": (file_content, ("image", "png"), {"filename": local_filename})} )[1] self.assertNotEqual(body.find(file_content), -1)
def test_mime_file_payload(self): """Test Request._generate_MIME_part loads binary as binary.""" local_filename = join_images_path('MP_sounds.png') with open(local_filename, 'rb') as f: file_content = f.read() submsg = Request._generate_MIME_part( 'file', file_content, ('image', 'png'), {'filename': local_filename}) self.assertEqual(file_content, submsg.get_payload(decode=True))
def test_mime_file_payload(self): """Test Request._generate_MIME_part loads binary as binary.""" local_filename = os.path.join(_images_dir, 'MP_sounds.png') with open(local_filename, 'rb') as f: file_content = f.read() submsg = Request._generate_MIME_part( 'file', file_content, ('image', 'png'), {'filename': local_filename}) self.assertEqual(file_content, submsg.get_payload(decode=True))
def test_mime_file_container(self): """Test Request._build_mime_request encodes binary.""" local_filename = join_images_path('MP_sounds.png') with open(local_filename, 'rb') as f: file_content = f.read() body = Request._build_mime_request({}, { 'file': (file_content, ('image', 'png'), {'filename': local_filename}) })[1] self.assertNotEqual(body.find(file_content), -1)
def test_upload_object(self): """Test Request object prepared to upload.""" # fake write test needs the config username site = self.get_site() site._username[0] = 'myusername' site._userinfo = {'name': 'myusername', 'groups': []} parameters = {'action': 'upload', 'file': 'MP_sounds.png', 'filename': join_images_path('MP_sounds.png')} req = Request(site=site, mime=True, parameters=parameters) self.assertEqual(req.mime, True)
def converttitle(title): oldtitle = title r = Request(site=site, parameters={ 'action': 'query', 'titles': title, 'redirects': 1, 'converttitles': 1 }) data = r.submit() title = list(data['query']['pages'].values())[0]['title'] mode = [] if 'redirects' in data['query']: # 重定向 mode.append('redirects') if 'converted' in data['query']: # 繁簡轉換 mode.append('converted') if 'normalized' in data['query']: # 命名空間 mode.append('normalized') if 'redirects' not in mode: page = pywikibot.Page(site, title) if not page.exists(): mode.append('vfd_on_source') if page.exists() and ( page.content_model != 'wikitext' or page.namespace().id == 8 or re.search( r'{{\s*([vaictumr]fd|Copyvio)', page.text, flags=re.I)): mode.append('vfd_on_source') else: page = pywikibot.Page(site, oldtitle) if page.exists() and ( page.content_model != 'wikitext' or page.namespace().id == 8 or re.search( r'{{\s*([vaictumr]fd|Copyvio)', page.text, flags=re.I)): mode.append('vfd_on_source') page = pywikibot.Page(site, title) if page.exists() and ( page.content_model != 'wikitext' or page.namespace().id == 8 or re.search( r'{{\s*([vaictumr]fd|Copyvio)', page.text, flags=re.I)): mode.append('vfd_on_target') if 'vfd_on_source' not in mode and 'vfd_on_target' not in mode: mode.append('no_vfd') return {'title': title, 'mode': mode}
def change_message_translation(msg_title: str, content: str) -> list: """ @param: msgTitle (str): Title of the message we want to change @param: content (str): New text of the translation unit TODO should we do it this way or probably we could just use the pywikibot.Page class and set the text?! """ global global_site logger.info(F"Change_message_translation: {msg_title}: {content}") requeste_for_token: list = Request(site=global_site, action="query", meta="tokens").submit() my_token: str = requeste_for_token['query']['tokens']['csrftoken'] result: list = Request(site=global_site, action="edit", title=msg_title, token=my_token, text=content).submit() return result
def test_upload_object(self): """Test Request object prepared to upload.""" # fake write test needs the config username site = self.get_site() site._username[0] = 'myusername' site._userinfo = {'name': 'myusername', 'groups': []} req = Request(site=site, action="upload", file='MP_sounds.png', mime=True, filename=os.path.join(_images_dir, 'MP_sounds.png')) self.assertEqual(req.mime, True)
def test_mime_file_container(self): local_filename = os.path.join(_images_dir, 'MP_sounds.png') with open(local_filename, 'rb') as f: file_content = f.read() body = Request._build_mime_request({}, { 'file': (file_content, ('image', 'png'), { 'filename': local_filename }) })[1] self.assertNotEqual(body.find(file_content), -1)
def is_user_active(site, username): """Tests if a given user is active.""" contributions = Request( site=site, action="query", list="usercontribs", ucuser=username, uclimit=1, ucprop="timestamp").submit()[u"query"][u"usercontribs"] return bool(contributions) and contributions[0][u"timestamp"].partition( "T")[0] > THREE_MONTHS_AGO
def main(self): badPage = pywikibot.Page(self.site, 'MediaWiki:Bad image list') text = badPage.text logger.info('cache pages') for page in badPage.linkedPages(): self.cachedPages[page.title()] = page logger.info('cache files') data = Request(site=self.site, parameters={ 'action': 'query', 'format': 'json', 'formatversion': '2', 'prop': 'imageinfo', 'titles': 'MediaWiki:Bad image list', 'generator': 'links', 'gplnamespace': '6', 'gpllimit': 'max' }).submit() for page in data['query']['pages']: if page['imagerepository'] != '': self.cachedFiles[page['title']] = True else: self.cachedFiles[page['title']] = False logger.info('get_en_list') en_list = self.get_en_list() logger.info('process_text') new_text = self.process_text(text, en_list) logger.info('done') if text == new_text: logger.info('nothing changed') return if self.CONFIRM: pywikibot.showDiff(text, new_text) save = input('Save?') elif self.DRY_RUN: save = 'no' else: save = 'yes' if save.lower() in ['y', 'yes']: badPage.text = new_text badPage.save(summary=self.cfg['summary'], minor=False, botflag=False) else: with open('temp.txt', 'w', encoding='utf8') as f: f.write(new_text)
def get_pages_categories(pagelist, site=DEFAULT_SITE, limit=500): """ For every page from the list get list of categories and return {page: [categories]} dictionary. """ result = dict.fromkeys(pagelist, []) kwargs = { "action": "query", "prop": "categories", "cllimit": "5000" } for idx in range(0, len(pagelist), limit): kwargs["titles"] = "|".join(pagelist[idx:idx+limit]) request = Request(site=site, **kwargs) while True: answer = request.submit() # Wikipedia API can return page list in non-canonical form! # At least when there are two possible canonical forms for one namespace # (for instance, "Участник" – "Участница" in Russian Wikipedia). # This query will normalize them and we need to handle it. denormalize = {} if "normalized" in answer["query"]: for fix in answer["query"]["normalized"]: denormalize[fix["to"]] = fix["from"] for value in answer["query"]["pages"].values(): title = value["title"] if title in denormalize: title = denormalize[title] if "categories" in value: cats = [cat["title"] for cat in value["categories"]] result[title] = result[title] + cats if "query-continue" in answer: request["clcontinue"] = answer["query-continue"]["categories"]["clcontinue"] continue break return result
def test_unexpected_user(self): """Test Request object when username is not correct.""" self.site._userinfo = { 'name': 'other_username', 'groups': [], 'id': '1' } self.site._username = '******' # Ignore warning: API write action by unexpected username commenced. with patch('pywikibot.warning'): Request(site=self.site, parameters={'action': 'edit'}) self.assertNotEqual(self.site.user(), self.site.username()) self.assertNotEqual(self.site.userinfo['name'], self.site.username()) self.assertFalse(self.site.logged_in())
def active_and_future_campaigns(): from pywikibot.data.api import Request from pywikibot import Timestamp parameters = { 'action': 'query', 'list': 'centralnoticeactivecampaigns', 'cnacincludefuture': '' } request = Request(_site, parameters=parameters) # TODO Error handling raw_query_data = request.submit() raw_campaigns = ( raw_query_data['query']['centralnoticeactivecampaigns']['campaigns']) # Convert start and end to datetime objects for c in raw_campaigns: c['start'] = Timestamp.fromtimestampformat(c['start']) c['end'] = Timestamp.fromtimestampformat(c['end']) return raw_campaigns
def get_ds_alert_hits(start_date: datetime.datetime, end_date: datetime.datetime) -> Iterator[DsAlert]: # url = "https://en.wikipedia.org/w/api.php" params = { "action": "query", "list": "abuselog", "format": "json", "formatversion": 2, "aflstart": start_date.isoformat(), "aflend": end_date.isoformat(), "afldir": "newer", "aflfilter": 602, "afllimit": "max", "aflprop": "user|title|result|timestamp|details|revid", "continue": "", } for i in range(100): logger.debug(i) # res = session.get(url, params=params) # res.raise_for_status() # raw_data = res.json() req = Request(site=site, parameters=params, use_get=True) raw_data = req.submit() # breakpoint() for hit in raw_data["query"]["abuselog"]: if hit["result"] == "tag": for alert in parse_alert_data(hit): yield alert if raw_data.get("continue"): logger.debug(f"Continue: {raw_data['continue']}") params.update(raw_data["continue"]) else: break else: # flask.abort(400) logger.warning("Too many API queries!")
def get_pages_categories(pagelist, site=DEFAULT_SITE, limit=500): """ For every page from the list get list of categories and return {page: [categories]} dictionary. """ result = dict.fromkeys(pagelist, []) kwargs = {"action": "query", "prop": "categories", "cllimit": "5000"} for idx in range(0, len(pagelist), limit): kwargs["titles"] = "|".join(pagelist[idx:idx + limit]) request = Request(site=site, **kwargs) while True: answer = request.submit() # Wikipedia API can return page list in non-canonical form! # At least when there are two possible canonical forms for one namespace # (for instance, "Участник" – "Участница" in Russian Wikipedia). # This query will normalize them and we need to handle it. denormalize = {} if "normalized" in answer["query"]: for fix in answer["query"]["normalized"]: denormalize[fix["to"]] = fix["from"] for value in answer["query"]["pages"].values(): title = value["title"] if title in denormalize: title = denormalize[title] if "categories" in value: cats = [cat["title"] for cat in value["categories"]] result[title] = result[title] + cats if "query-continue" in answer: request["clcontinue"] = answer["query-continue"]["categories"][ "clcontinue"] continue break return result
def get_summary(self, is_archive=False): summary = "[[Wikidata:Bots|Bot]]: %s %i request%s %s %s" % ( "Archived" if is_archive else "Archiving", self.archive_count, "" if self.archive_count == 1 else "s", "from" if is_archive else "to", self.rfd_page.title(asLink=True) if is_archive else self.archive_page.title(asLink=True)) if is_archive: return summary version = self.rfd_page.getVersionHistory(total=1)[0] user = version[2] params = { "action": "query", "list": "users", "ususers": user, "usprop": "groups" } user_groups = Request(**params).submit()["query"]["users"][0]["groups"] summary += " (last edit at %s by [[User:%s|]]%s%s" % ( version[1], user, " (administrator)" if (not ("bot" in user_groups) and ("sysop" in user_groups)) else "", ": '%s'" % version[3] if version[3] else "") return summary
def collect_info(site=DEFAULT_SITE): """ Get the list of unreviewed files with additional information: "filename": title of file page "extension": in lowercase "filecats": file categories "pages": pages where category is used "categories": categories of those pages """ result = [] # get filename and pages from api request def _submit_and_parse(request): """Divide answer to list of values and continue info.""" answer = request.submit() values = list(answer["query"]["pages"].values()) if "query-continue" in answer: contin = answer["query-continue"] else: contin = {} return (values, contin) kwargs = { "action": "query", "prop": "fileusage", "fulimit": "5000", "generator": "unreviewedpages", "gurnamespace": "6", "gurfilterredir": "nonredirects", "gurlimit": "5000" } while True: # iterate for gurstart, get list of files request = Request(site=site, **kwargs) (values, contin) = _submit_and_parse(request) chunk = [{"filename": value["title"], "pages": []} for value in values] while True: # iterate for fucontinue, get list of file users for key, value in enumerate(values): if "fileusage" in value: chunk[key]["pages"] += [ usageinfo["title"] for usageinfo in value["fileusage"] ] if "fileusage" in contin: request["fucontinue"] = contin["fileusage"]["fucontinue"] (values, contin) = _submit_and_parse(request) continue else: break result += chunk if "unreviewedpages" in contin: kwargs["gurstart"] = contin["unreviewedpages"]["gurstart"] continue else: break # collect additional info pagelist = [value["filename"] for value in result] for value in result: pagelist += value["pages"] pagelist = list(set(pagelist)) catdict = get_pages_categories(pagelist, site=site) for value in result: if "." in value["filename"]: value["extension"] = re.match(r".*\.(.+)$", value["filename"]).group(1).lower() else: value["extension"] = "" categories = [] for page in value["pages"]: categories += catdict[page] value["categories"] = set(categories) value["filecats"] = set(catdict[value["filename"]]) value["pages"] = set(value["pages"]) return result
def __init__(self, *args, **kwargs): _original_Request.__init__(self, *args, **kwargs)
AND log_timestamp < {} AND log_timestamp > {} AND log_title IN ({}) GROUP BY log_title '''.format(BASETIME.totimestampformat(), DAYS_400_AGO.totimestampformat(), ','.join(['%s'] * len(eligible_usernames))), eligible_usernames) if args.debug: print('Found {} users with block log'.format(len(result))) for row in result: user_name = row[0].decode().replace('_', ' ') r = Request(site=site, parameters={ 'action': 'query', 'format': 'json', 'list': 'logevents', 'letype': 'block', 'lestart': BASETIME.isoformat(), 'leend': DAYS_400_AGO.isoformat(), 'letitle': 'User:'******'lelimit': 'max' }) data = r.submit() unblocktime = None # print(user_name) for logevent in data['query']['logevents']: if logevent['action'] == 'unblock': unblocktime = pywikibot.Timestamp.fromISOformat( logevent['timestamp']) else: if 'expiry' in logevent['params']: expiry = pywikibot.Timestamp.fromISOformat(
import traceback os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__)) import pywikibot from pywikibot.data.api import Request os.environ['TZ'] = 'UTC' site = pywikibot.Site() site.login() token = site.tokens['csrf'] with open("list-delete.csv", "r") as f: r = csv.reader(f) for row in r: pageid = row[2] oldtitle = row[1].strip() print("delete", oldtitle, pageid) try: data = Request(site=site, parameters={ "action": "delete", "format": "json", "pageid": pageid, "reason": "[[:phab:T187783]],刪除小寫重定向", "token": token }).submit() except Exception as e: traceback.print_exc()
#!/usr/bin/python # -*- coding: utf-8 -*- # Distributed under the terms of MIT License (MIT) import pywikibot import time from pywikibot.data.api import Request import re site = pywikibot.Site('fa', fam='wikipedia') print "Fetching admins list" data = Request(site=site, action="query", list="allusers", augroup="sysop", aulimit=500).submit() adminsac = [] adminbots = ["Dexbot"] adminsdiac = {} for admin in data["query"]["allusers"]: admin = admin["name"] if admin in adminbots: continue acaction = [] dcaction = [] actions = "block, protect, rights, delete, upload, import, renameuser".split( ", ") for adminaction in actions: data1 = Request(site=site, action="query", list="logevents", leuser=admin,
import sys from datetime import datetime, timedelta import mwparserfromhell os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__)) import pywikibot from pywikibot.data.api import Request site = pywikibot.Site() site.login() r = Request(site=site, parameters={ "action": "query", "format": "json", "list": "logevents", "leaction": "rights/rights", "lelimit": "max" }) data = r.submit() text = '''{| class="wikitable sortable" |+ ! 時間 ! 管理員 ! 授權天數 ! 對象 ! 理由 ''' for log in data['query']['logevents']:
print('postId', postId) print('revisionId', revisionId) oldtext = reply['content']['content'] # tech new 2019-46 newtext = oldtext.replace( "{{{{{|safesubst:}}}Technews-zh/2|2019|46}}", "維基媒體技術社群發出最新'''[[m:Special:MyLanguage/Tech/News|技術新聞]]'''。請告知其他用戶以下變更。當然,未必所有變更都會影響閣下。[[m:Special:MyLanguage/Tech/News/2019/46|翻譯本於此]]。") if oldtext == newtext: print('no changed') continue pywikibot.showDiff(oldtext, newtext) save = input('Save?') if save.lower() in ['', 'y', 'yes']: data = { 'action': 'flow', 'submodule': 'edit-post', 'page': topicid, 'eppostId': postId, 'epprev_revision': revisionId, 'epcontent': newtext, 'token': token, 'format': 'json', } res = Request(site=site, parameters=data).submit() print(res)
def __init__(self, *args, **kwargs): """Initializer.""" _original_Request.__init__(self, *args, **kwargs)
i += 1 cnt = 0 for row in pages: title = row[0].decode() titleparts = title.split('/') olduser = titleparts[0] subpage = '/'.join(titleparts[1:]) page = pywikibot.Page(site, 'User talk:' + title) r = Request(site=site, parameters={ 'action': 'query', 'format': 'json', 'list': 'logevents', 'utf8': 1, 'leprop': 'ids|timestamp|details', 'letype': 'renameuser', 'letitle': 'User:'******'query']['logevents']) != 1: if args.manual: for i in range(len(data['query']['logevents'])): print('{}. {}'.format(i, data['query']['logevents'][i])) while True: try: idx = int(input('Pick a log: ')) logevent = data['query']['logevents'][idx] break except Exception as e:
def create_simple(cls, **kwargs): """Skip CachedRequest implementation.""" return _original_Request.create_simple(**kwargs)
flag = 0 if re.search(r'\|\s*set\s*=.*([編编][輯辑]|[刪删]除)?[內内]容', sectext): flag |= 1 if args.debug: print('\tcontent') if re.search(r'\|\s*set\s*=.*([編编][輯辑])?摘要', sectext): flag |= 2 if args.debug: print('\tsummary') if flag != 0: ids = re.findall(r'\|id\d+\s*=\s*(\d+)', sectext) if ids: data = Request(site=site, parameters={ 'action': 'query', 'list': 'logevents', 'leaction': 'delete/revision', 'lelimit': '10', 'letitle': title }).submit() deleted = 0 admins = {} for logevent in data['query']['logevents']: logid = str(logevent['logid']) admin = logevent['user'] if args.debug: print('\t', logevent) if (logevent['params']['type'] == 'revision' and logevent['params']['new']['bitmask'] & flag == flag): for rvid in logevent['params']['ids']: rvid = str(rvid) if rvid in ids:
def __init__(self, *args, **kwargs): """Constructor.""" _original_Request.__init__(self, *args, **kwargs)
def test_upload_object(self): """Test Request object prepared to upload.""" req = Request(site=self.get_site(), action="upload", file='MP_sounds.png', mime=True, filename=os.path.join(_data_dir, 'MP_sounds.png')) self.assertEqual(req.mime, True)
for title in cfg['skip_templates']: for page in pywikibot.Page( site, title).getReferences(only_template_inclusion=True): skip_pages.add(page.title()) parameters = { "action": "query", "format": "json", "list": "querypage", "qppage": "UnconnectedPages", "qplimit": "max" } allpages = [] while True: print(parameters) r = Request(site=site, parameters=parameters) data = r.submit() for row in data['query']['querypage']['results']: allpages.append(row) del data['query'] if 'query-continue' not in data: break for key in data['query-continue']['querypage']: parameters[key] = data['query-continue']['querypage'][key] text_temp = collections.defaultdict(str) for row in allpages: title = row['title'] if title in skip_pages: continue
os.environ['TZ'] = 'UTC' site = pywikibot.Site() site.login() token = site.tokens['csrf'] with open("list.csv", "r") as f: r = csv.reader(f) for row in r: oldtitle = row[1].strip() page = pywikibot.Page(site, oldtitle) ns = page.namespace().custom_name title = page.titleWithoutNamespace() title = title[0].upper() + title[1:] newtitle = ns + ":" + title print("move", oldtitle, "to", newtitle) try: data = Request(site=site, parameters={ "action": "move", "format": "json", "fromid": row[0], "to": newtitle, "reason": "[[:phab:T187783]],移動到大寫開頭", "noredirect": 1, "token": token }).submit() except Exception as e: traceback.print_exc()
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__)) import pywikibot from pywikibot.data.api import Request os.environ['TZ'] = 'UTC' site = pywikibot.Site() site.login() token = site.tokens['csrf'] with open("list.csv", "r") as f: r = csv.reader(f) for row in r: oldtitle = row[0].strip() newtitle = row[1].strip() print("move", oldtitle, "to", newtitle) try: data = Request(site=site, parameters={ "action": "move", "format": "json", "from": oldtitle, "to": newtitle, "reason": "轉為全站小工具", "noredirect": 1, "token": token }).submit() except Exception as e: traceback.print_exc()
for botname in cntbot: groups[botname] = [] user = pywikibot.User(site, botname) if len(user.groups()) == 0: print(botname, 'is missing') continue if 'bot' in user.groups(): groups[botname].append('有') else: res = Request(site=site, parameters={ "action": "query", "format": "json", "meta": "globaluserinfo", "guiuser": botname, "guiprop": "groups" }).submit() if 'global-bot' in res['query']['globaluserinfo']['groups']: groups[botname].append('全域') if 'sysop' in user.groups(): groups[botname].append('管理') def cmp(a, b): if a['bot'] == b['bot']: if a['operator'] == b['operator']: return -1 if a['idx'] < b['idx'] else 1 return -1 if a['operator'] < b['operator'] else 1