def main(): """ :return: None :rtype: None """ ics_url, file, wiki, debug = get_args() event_strings = [] past_events = [] if file: calendar = Calendar(deradicalise_ical(open(file).read())) else: ics_result = requests.get(ics_url) ics_result.encoding = 'utf-8' calendar = Calendar(deradicalise_ical(ics_result.text)) for event in sorted(calendar.events, key=lambda ev: ev.begin): event = EntropiaEvent(event) if not event.is_past_event: event_strings.append("\n" + LINE_SEPARATOR + str(event)) else: past_events.append(event) append_past_events(past_events, wiki['user'], wiki['pass'], wiki['archive']) termine = BOTWARNING + "\n" + TABLE_HEADER + "\n" + "".join( event_strings) + "\n" + "".join(TABLE_FOOTER) if debug: print(termine) site = Site('entropia.de', path='/') site.login(wiki['user'], wiki['pass']) page = site.pages[wiki['page']] if termine: page.save(termine, "Terminbot was here") page.purge()
def localblock(bot, trigger, username, password, Site): if trigger.account in stewards or trigger.account in cvt: options = trigger.group(2).split(" ") if len(options) == 2: wiki = options[0] target = options[1] site = Site(wiki + '.miraheze.org', clients_useragent=ua) site.login(username, password) api(query, http_method='POST', format='json', meta='tokens') for token in result['query']['tokens'].values(): tokens = token['csrftoken'] site.api(block, http_method='POST', format='json', user=target, expiry='3 days', nocreate=1, autoblock=1, token=tokens) elif len(options) > 2 and len(options) < 5: wiki = options[0] target = options[1] time = options[2] site = Site(wiki + '.miraheze.org', clients_useragent=ua) site.login(username, password) api(query, http_method='POST', format='json', meta='tokens') for token in result['query']['tokens'].values(): tokens = token['csrftoken'] site.api(block, http_method='POST', format='json', user=target, expiry=time, nocreate=1, autoblock=1, token=tokens) else: bot.reply('Syntax is .block <wiki> <target> <time>', trigger.sender) else: if trigger.account == '': noaccount() else: bot.say('Access Denied: ' + trigger.account + ' (' + trigger.hostmask + ') is not in the trusted list. This incident will be reported.', trigger.sender) bot.say('Security Alert: ' + trigger.account + ' (' + trigger.hostmask + ') attempted to use CVT on ' + trigger.sender, '#ExamBot-logs')
def login(host: str, path: str, scheme: str, username='', password='') -> \ ClassVar: site_ = Site(host=host, path=path, scheme=scheme) if username and password: site_.login(username=username, password=password) global site site = site_ return site_
def site_login(): """ Log-in to the Semantic MediaWiki using a USERNAME and PASSWORD Create a file called local.py with your credentials """ site = Site(('http', 'beta.floranorthamerica.org')) site.login(src.local.USERNAME, src.local.PASSWORD) return site
def main(): parser = argparse.ArgumentParser(description='Datofeilfikser') parser.add_argument('--page', required=False, help='Name of a single page to check') args = parser.parse_args() cnt = {'pagesChecked': 0, 'datesChecked': 0, 'datesModified': 0, 'datesUnresolved': 0} pagesWithNoKnownErrors = [] unresolved = [] config = json.load(open('config.json', 'r')) site = Site('no.wikipedia.org') site.login(config['username'], config['password']) cat = site.Categories['Sider med kildemaler som inneholder datofeil'] if args.page: page = site.pages[args.page] p = Page(page) else: n = 0 for page in cat.members(): n += 1 logging.info('%02d %s - %.1f MB', n, page.name, memory_usage_psutil()) # print "-----------[ %s ]-----------" % page.name p = Page(page) cnt['pagesChecked'] += 1 cnt['datesChecked'] += p.checked cnt['datesModified'] += len(p.modified) cnt['datesUnresolved'] += len(p.unresolved) if len(p.modified) == 0 and len(p.unresolved) == 0: pagesWithNoKnownErrors.append(page.name) unresolved.extend(p.unresolved) # if cnt['pagesChecked'] > 100: # break # print # print "Pages with no known templates with date errors:" # for p in pagesWithNoKnownErrors: # print ' - %s' % p cnt['datesOk'] = cnt['datesChecked'] - cnt['datesModified'] - cnt['datesUnresolved'] unresolvedTxt = u"Pages checked: %(pagesChecked)d, dates checked: %(datesChecked)d, of which<br>\n" % cnt unresolvedTxt += " OK: %(datesOk)d, modified: %(datesModified)d, unresolved errors: %(datesUnresolved)d\n\n" % cnt unresolvedTxt += u'Unresolved errors:\n\n{|class="wikitable sortable"\n! Artikkel !! Felt !! Verdi\n|-\n' for p in unresolved: unresolvedTxt += u'| [[%(page)s]] || %(key)s || <nowiki>%(value)s</nowiki>\n|-\n' % p page = site.pages[u'Bruker:DanmicholoBot/Datofiks/Uløst'] page.save(unresolvedTxt, summary='Oppdaterer')
def main(): # Generate below at https://dota2.gamepedia.com/Special:BotPasswords username = '******' password = '******' dotawiki = Site('dota2.gamepedia.com/', path='', clients_useragent='MePsyDuckDota2WikiEditingBot/1.0') dotawiki.login(username=username, password=password) process(dotawiki)
def login(host: str, path: str, scheme: str, username='', password='') -> \ ClassVar: ''' Logs in to wiki. Creates the global var site (instance of mwclient.Site), though which writing to the wiki is possible ''' site_ = Site(host=host, path=path, scheme=scheme) if username and password: site_.login(username=username, password=password) global site site = site_ return site_
def append_past_events(past_events, wiki_user, wiki_pw, wiki_archive): """ Append the "new" past events to the wiki archive page :param past_events: the past events that were not added to the events page :type past_events: list :param wiki_user: bot user for the wiki :type wiki_user: str :param wiki_pw: password for the wiki user :type wiki_pw: str :param wiki_archive: archive page :type wiki_archive: str :return: None :rtype: None """ site = Site('entropia.de', path='/') site.login(wiki_user, wiki_pw) page = site.pages[wiki_archive] text = page.text().split('\n') last_table_position = 0 for event in past_events: year_header = "== {} ==".format(event.endtime.strftime('%Y')) for index, txtline in enumerate(text): if txtline == '|}': last_table_position = index if str(event) in text: continue if year_header in text: append_list = ('\n' + LINE_SEPARATOR + str(event)) text = text[:last_table_position] + [ append_list, ] + text[last_table_position:] else: append_list = (3 * '\n' + year_header + ARCHIVE_TABLE_HEADER + '\n' + LINE_SEPARATOR + '\n' + str(event) + '\n|}') text = text[:last_table_position + 1] + [ append_list, ] + text[last_table_position + 1:] page.save("\n".join(text))
def addToWiki(name, text): site = Site(cfg.wikiurl, path="/") site.login(cfg.wikiuser, cfg.wikipassword) page = site.pages[name] patternstart = r'<(a).*?>' result = re.sub(patternstart, "[", text) patternend = r'<(/a).*?>' result = re.sub(patternend, "]", result) search_results = re.finditer(r'\[.*?\]', result) for item in search_results: rep = str(item.group(0)).lstrip('[').rstrip(']').replace("/", "/") result = result.replace(str(item.group(0)), "[" + rep + " " + rep + "]") result.replace("<!DOCTYPE HTML><html><body>", "").replace( "</body></html>", "") + "[[Kategorie:" + cfg.wikiKategorie + "]]" write_Plenum(name, result) page.save(text=result, summary="Neuanlage")
class website_connection: ua = "MultiWikiTool run by User: "******"Incorrect login info") raise IOError('Incorrect login info for' + wikipath) #throw and catch error in parent block self.page = self.website.pages[page_name] #gets the text in the page, returns empty string if page does not exist def get_text(self): return self.page.text() def save_text(self, text, edit_summary): self.page.save(text, edit_summary)
def start(self, job: Job): import sys args, config, logger, lang = self.__init_system() logger.info(lang.t("main.starting_bot")) client = Site(config["wiki"]["api"]) logger.info(lang.t("main.logging_in").format(name=config["wiki"]["name"])) try: client.user = args.user client.password = args.password client.login(args.user, args.password) logger.info(lang.t("main.logged_in").format(user=args.user)) except LoginError: logger.error(lang.t("main.wrong_credentials")) sys.exit(2) except MaximumRetriesExceeded: logger.error(lang.t("main.maximum_retries")) sys.exit(2) except APIError: logger.error(lang.t("main.api_error")) sys.exit(2) job.bootstrap(client, logger, args.tasks, args.password) job.run()
cookies_file_en = '/data/project/deltaquad-bots/stewie-en.txt' cookies_file_meta = '/data/project/deltaquad-bots/stewie-meta.txt' #en login cookie_jar_en = MozillaCookieJar(cookies_file_en) if os.path.exists(cookies_file_en): # Load cookies from file, including session cookies (expirydate=0) cookie_jar_en.load(ignore_discard=True, ignore_expires=True) connection = requests.Session() connection.cookies = cookie_jar_en # Tell Requests session to use the cookiejar. enwiki = Site('en.wikipedia.org', pool=connection) if not enwiki.logged_in: enwiki.login(login.username, login.password) # Save cookies to file, including session cookies (expirydate=0) cookie_jar_en.save(ignore_discard=True, ignore_expires=True) ############### #meta login cookie_jar_meta = MozillaCookieJar(cookies_file_meta) if os.path.exists(cookies_file_meta): # Load cookies from file, including session cookies (expirydate=0) cookie_jar_meta.load(ignore_discard=True, ignore_expires=True) connection = requests.Session() connection.cookies = cookie_jar_meta # Tell Requests session to use the cookiejar.
def download(dl, args): url = urlparse(dl) if url.netloc: filename = url.path site_name = url.netloc if args.site is not DEFAULT_SITE and not args.quiet: # this will work even if the user specifies 'commons.wikimedia.org' print('Warning: target is a URL, ' 'ignoring site specified with --site') else: filename = dl site_name = args.site file_match = valid_file(filename) # check if this is a valid file if file_match and file_match.group(1): # has File:/Image: prefix and extension filename = file_match.group(2) else: # no file extension and/or prefix, probably an article print("Could not parse input '{}' as a file. ".format(filename)) sys.exit(1) filename = unquote(filename) # remove URL encoding for special characters dest = args.output or filename if args.verbose >= 2: print('User agent: {}'.format(USER_AGENT)) # connect to site and identify ourselves if args.verbose >= 1: print('Site name: {}'.format(site_name)) try: site = Site(site_name, path=args.path, clients_useragent=USER_AGENT) if args.username and args.password: site.login(args.username, args.password) except ConnectionError as e: # usually this means there is no such site, or there's no network # connection, though it could be a certificate problem print("Error: couldn't connect to specified site.") if args.verbose >= 2: print('Full error message:') print(e) sys.exit(1) except HTTPError as e: # most likely a 403 forbidden or 404 not found error for api.php print("Error: couldn't find the specified wiki's api.php. " "Check the value of --path.") if args.verbose >= 2: print('Full error message:') print(e) sys.exit(1) except (InvalidResponse, LoginError) as e: # InvalidResponse: site exists, but we couldn't communicate with the # API endpoint for some reason other than an HTTP error. # LoginError: missing or invalid credentials print(e) sys.exit(1) # get info about the target file try: file = site.images[filename] except APIError as e: # an API error at this point likely means access is denied, # which could happen with a private wiki print('Error: access denied. Try providing credentials with ' '--username and --password.') if args.verbose >= 2: print('Full error message:') for i in e.args: print(i) sys.exit(1) if file.imageinfo != {}: # file exists either locally or at a common repository, # like Wikimedia Commons file_url = file.imageinfo['url'] file_size = file.imageinfo['size'] file_sha1 = file.imageinfo['sha1'] if args.verbose >= 1: print("Info: downloading '{}' " "({} bytes) from {}".format(filename, file_size, site.host), end='') if args.output: print(" to '{}'".format(dest)) else: print('\n', end='') print('Info: {}'.format(file_url)) if os.path.isfile(dest) and not args.force: print("File '{}' already exists, skipping download " "(use -f to ignore)".format(dest)) else: try: fd = open(dest, 'wb') except IOError as e: print('File could not be written. ' 'The following error was encountered:') print(e) sys.exit(1) else: # download the file(s) if args.verbose >= 1: leave_bars = True else: leave_bars = False with tqdm(leave=leave_bars, total=file_size, unit='B', unit_scale=True, unit_divisor=CHUNKSIZE) as progress_bar: with fd: res = site.connection.get(file_url, stream=True) progress_bar.set_postfix(file=dest, refresh=False) for chunk in res.iter_content(CHUNKSIZE): fd.write(chunk) progress_bar.update(len(chunk)) # verify file integrity and optionally print details dl_sha1 = verify_hash(dest) if args.verbose >= 1: print('Info: downloaded file SHA1 is {}'.format(dl_sha1)) print('Info: server file SHA1 is {}'.format(file_sha1)) if dl_sha1 == file_sha1: if args.verbose >= 1: print('Info: hashes match!') # at this point, we've successfully downloaded the file else: print('Error: hash mismatch! Downloaded file may be corrupt.') sys.exit(1) else: # no file information returned print( "Target '{}' does not appear to be a valid file.".format(filename)) sys.exit(1)
try: from scheduled_bots.local import WDUSER, WDPASS except ImportError: if "WDUSER" in os.environ and "WDPASS" in os.environ: WDUSER = os.environ['WDUSER'] WDPASS = os.environ['WDPASS'] else: raise ValueError( "WDUSER and WDPASS must be specified in local.py or as environment variables" ) CACHE_SIZE = 99999 CACHE_TIMEOUT_SEC = 300 # 5 min site = Site(('https', 'www.wikidata.org')) site.login(WDUSER, WDPASS) def chunks(iterable, size): it = iter(iterable) item = list(islice(it, size)) while item: yield item item = list(islice(it, size)) @cached(TTLCache(CACHE_SIZE, CACHE_TIMEOUT_SEC)) def getConceptLabels(qids): qids = "|".join({ qid.replace("wd:", "") if qid.startswith("wd:") else qid for qid in qids
sys.exit(1) registrant = 'NONE' return (registrant, target) # # Main # userinfo = getUserInfo(BOTINFO) email = getEmail(EMAILINFO) try: site = Site('en.wikipedia.org') site.login(userinfo['username'], userinfo['password']) except Exception: traceback.print_exc() sys.exit(1) crossref = queryCrossref(email, APIMEMBERS, APIPREFIXES, BLOCKSIZE) filename = os.environ[ 'WIKI_WORKING_DIR'] + '/Dois/doi-registrants-' + date.today().strftime( '%Y%m%d') file = open(filename, 'w', 1) print('Retrieving Wikipedia data ...') for order in tqdm(sorted(crossref, key=int), leave=None):
sorted_values = list(plants.values()) sorted_values.sort(key=sortFunc) sorted_plants = {} for i in sorted_values: for k in plants.keys(): if plants[k] == i: sorted_plants[k] = plants[k] plants.pop(k) break for pl in sorted_plants.keys(): page_txt += "|-\n| {} || {}\n".format(data["plants"][pl]["name"], sorted_plants[pl]) page_txt += "|}\n" # Output with open("biomes.txt", "w") as f: f.write(page_txt) ua = "RealisticBiomes/0.0.1 Smal" site = Site('civwiki.org', clients_useragent=ua) page = site.pages['Template:RealisticBiomesConfig (CivClassic 2.0)'] text = page.text() #text. += page_txt site.login(USER, PASSWORD) page.edit(page_txt, "Automated Data Update") print(page.text())
def main(argv): # ------------- Constant Variables ---------------- MERGE = True WORLD_AREA = math.pi * (13000 * 13000) MODE = "OFFLINE" DATA_URL = "https://githubraw.com/ccmap/data/master/land_claims.civmap.json" SANDBOX = False # ------------------------------------------------ try: opts, args = getopt.getopt( argv, "h", ["markdown", "wiki", "offline", "sandbox", "help"]) except getopt.GetoptError: print("areaCalculator.py --wiki") sys.exit(2) for opt, arg in opts: if opt in ('-h', '--help'): print("--markdown , --wiki , --offline , --sandbox , --help") if opt in "--markdown": MODE = "MARKDOWN" if opt in "--wiki": MODE = "WIKI" if opt in "--offline": MODE = "OFFLINE" if opt in "--sandbox": MODE = "WIKI" SANDBOX = True # Get the latest claims json headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0' } req = urllib.request.Request(url=DATA_URL, headers=headers) with urllib.request.urlopen(req) as url: data = json.loads(url.read().decode()) # Calculate and sort the area of every polygon, combining ones from the same nation areas = {} shortnames = {} for feat in data["features"]: name = feat["name"] if MERGE: nation = (re.sub( "\(|\)", "", re.search("(^[^()]+$)|\((.*)\)", name.replace("\n", " ")).group())) if "shortname" in feat: shortnames[nation] = feat["shortname"] if ACRONYMS.get(nation) is not None: nation = ACRONYMS.get(nation) else: nation = name area = 0 if "polygon" in feat: for poly in feat["polygon"]: area += polygon_area(poly) else: print(feat) if nation in areas: areas[nation] += area else: areas[nation] = area areas_sorted = {} areas_sorted_keys = sorted(areas, key=areas.get, reverse=True) for w in areas_sorted_keys: areas_sorted[w] = areas[w] # Render the table if MODE == "MARKDOWN": with open('areas.md', 'w') as f: f.write("#|Nation|Area (km²)|% of Map Area\n") f.write(":---:|:---:|:---:|:---:|\n") f.write("{}|{}|{}|{}\n".format(0, "*CivClassic*", round(WORLD_AREA / 1000000, 3), 100)) i = 1 for key in areas_sorted.keys(): are = round(areas[key] / 1000000, 3) per = round((areas[key] / WORLD_AREA) * 100, 3) print(key, are) f.write("{}|{}|{}|{}\n".format(i, key, are, per)) i = i + 1 if MODE == "WIKI" or MODE == "OFFLINE": # Get all countries with a flag template flag_template_whitelist = [] ua = "AreaListCalculator/0.0.1 Smal" site = Site('civwiki.org', clients_useragent=ua) category = site.categories['All country data templates'] for page in category: flag_template_whitelist.append( page.name[len("Template:Country data") + 1:]) # Generate the wiki table new_table = "" new_table += "{| class=\"wikitable sortable\"\n|+\n!Rank\n!Nation\n!Area in km²\n!% of Map Area\n|-\n" new_table += ("|-\n|{}\n|{}\n|{}\n|{}\n".format( 0, "''[[CivClassic]]''", round(WORLD_AREA / 1000000, 3), 100)) i = 1 for key in areas_sorted.keys(): are = round(areas[key] / 1000000, 3) per = round((areas[key] / WORLD_AREA) * 100, 3) #print(key,are) nation_txt = "[[{}]]".format(key) if key in flag_template_whitelist: nation_txt = "{{{{flag|{}}}}}".format(key) elif key in shortnames: if shortnames[key] in flag_template_whitelist: nation_txt = "{{{{flag|{}}}}}".format(shortnames[key]) new_table += "|-\n|{}\n|{}\n|{}\n|{}\n".format( i, nation_txt, are, per) i = i + 1 new_table += "|}" # Upload the table to civwiki if SANDBOX == False: page = site.pages['List_of_nations_by_area'] else: page = site.pages['List_of_nations_by_area/Sandbox'] text = page.text() parsed = wtp.parse(text) for section in parsed.sections: if section.title == "Nations by area": section.contents = new_table print(parsed.string) if MODE == "OFFLINE": with open('areas.txt', 'w') as f: f.write(parsed.string) else: site.login(USER, PASSWORD) page.edit(parsed.string, "Automated Table Update")
def main(argv): # ------------- Constant Variables ---------------- MERGE = True WORLD_AREA = math.pi * (13000 * 13000) MODE = os.environ.get('MODE', 'OFFLINE') SANDBOX = False USER = os.environ.get('USERNAME', '') PASSWORD = os.environ.get('PASSWORD', '') # ------------------------------------------------ if MODE == "SANDBOX": SANDBOX = True MODE == "WIKI" # ["MARKDOWN","WIKI","OFFLINE","SANDBOX","USERNAME","PASSWORD"] # Get the latest claims json with open('land_claims.civmap.json', 'r') as file: data = json.loads(file.read()) # Calculate and sort the area of every polygon, combining ones from the same nation owner = {} areas_t = {} areas = {} shortnames = {} for feat in data["features"]: name = feat["name"] territory = feat["name"] if MERGE: nation = (re.sub( "\(|\)", "", re.search("(^[^()]+$)|\((.*)\)", name.replace("\n", " ")).group())) territory = name.replace("\n", " ").strip() if "shortname" in feat: shortnames[nation] = feat["shortname"] if ACRONYMS.get(nation) is not None: nation = ACRONYMS.get(nation) if ACRONYMS.get(territory) is not None: territory = ACRONYMS.get(territory) else: nation = name if territory[0] == "(": # Handle specific edge cases territory = nation owner[territory] = nation area = 0 if "polygon" in feat: for poly in feat["polygon"]: area += polygon_area(poly) # Nation for nat in nation.split('/'): if nat in areas: areas[nat] += area else: areas[nat] = area # Territories if territory in areas_t: areas_t[territory] += area else: areas_t[territory] = area areas_sorted = {} areas_sorted_keys = sorted(areas, key=areas.get, reverse=True) for w in areas_sorted_keys: areas_sorted[w] = areas[w] areas_t_sorted = {} areas_t_sorted_keys = sorted(areas_t, key=areas_t.get, reverse=True) for w in areas_t_sorted_keys: areas_t_sorted[w] = areas_t[w] # Render the table if MODE == "MARKDOWN": with open('areas.md', 'w') as f: f.write("#|Nation|Area (km²)|% of Map Area\n") f.write(":---:|:---:|:---:|:---:|\n") f.write("{}|{}|{}|{}\n".format(0, "*CivClassic*", round(WORLD_AREA / 1000000, 3), 100)) i = 1 for key in areas_sorted.keys(): are = round(areas[key] / 1000000, 3) per = round((areas[key] / WORLD_AREA) * 100, 3) # print(key,are) f.write("{}|{}|{}|{}\n".format(i, key, are, per)) i = i + 1 if MODE == "WIKI" or MODE == "OFFLINE": # Get all countries with a flag template flag_template_whitelist = [] ua = "AreaListCalculator/0.0.1 Smal" site = Site('civwiki.org', clients_useragent=ua) category = site.categories['All country data templates'] for page in category: flag_template_whitelist.append( page.name[len("Template:Country data") + 1:]) # ------------------------------------------ # Generate the nation table # nation_table = "" nation_table += "{| class=\"wikitable sortable\"\n|+\n!Rank\n!Nation\n!Area in km²\n!% of Map Area\n|-\n" nation_table += ("|-\n|{}\n|{}\n|{}\n|{}\n".format( 0, "''[[CivClassic]]''", round(WORLD_AREA / 1000000, 3), 100)) i = 1 for key in areas_sorted.keys(): are = round(areas[key] / 1000000, 3) per = round((areas[key] / WORLD_AREA) * 100, 3) #print(key,are) nation_txt = "[[{}]]".format(key) if key in flag_template_whitelist or key in FLAG_REDIRECTS: nation_txt = "{{{{flag|{}}}}}".format(key) elif key in shortnames: if shortnames[key] in flag_template_whitelist: nation_txt = "{{{{flag|{}}}}}".format(shortnames[key]) nation_table += "|-\n|{}\n|{}\n|{}\n|{}\n".format( i, nation_txt, are, per) i = i + 1 nation_table += "|}\n" # ------------------------------------------------ # Generate the territory table # territory_table = "" territory_table += "{| class=\"wikitable sortable\"\n|+\n!Rank\n!Territory\n!Area in km²\n!% of Map Area\n|-\n" territory_table += ("|-\n|{}\n|{}\n|{}\n|{}\n".format( 0, "''[[CivClassic]]''", round(WORLD_AREA / 1000000, 3), 100)) i = 1 for key in areas_t_sorted.keys(): are = round(areas_t[key] / 1000000, 3) per = round((areas_t[key] / WORLD_AREA) * 100, 3) territory = re.sub("\(.*?\)", "", key).strip() nation = owner[key] #print(territory,nation) territory_txt = key if territory == nation: territory_txt = "[[{}]]".format(key) if key in flag_template_whitelist or key in FLAG_REDIRECTS: territory_txt = "{{{{flag|{}}}}}".format(key) elif key in shortnames: if shortnames[key] in flag_template_whitelist: territory_txt = "{{{{flag|{}}}}}".format( shortnames[key]) else: # This is a territory flag = "" # Check if terrritory has flag of its own if territory in flag_template_whitelist or territory in FLAG_REDIRECTS: flag = "{{{{flagicon|{}}}}} ".format(territory) # -- disabled code that would've placed the parent nation's flag if the territory lacked its own #else: # if nation in flag_template_whitelist or nation in FLAG_REDIRECTS: # flag = "{{{{flagicon|{}}}}} ".format(nation) # elif nation in shortnames: # if shortnames[nation] in flag_template_whitelist: # flag = "{{{{flagicon|{}}}}} ".format(shortnames[nation]) territory_txt = flag + "[[{}|{}]]".format(territory, key) territory_table += "|-\n|{}\n|{}\n|{}\n|{}\n".format( i, territory_txt, are, per) i = i + 1 territory_table += "|}\n" #print(territory_table) # --------------------------------------------------- # Upload the table to civwiki # if SANDBOX == False: page = site.pages['List_of_nations_by_area (CivClassic)'] else: page = site.pages['List_of_nations_by_area (CivClassic)/Sandbox'] text = page.text() parsed = wtp.parse(text) # print(parsed.pformat()) for section in parsed.sections: if section.title == "Nations by area": section.contents = nation_table if section.title == "Territories by area": section.contents = territory_table if MODE == "OFFLINE": with open('areas.txt', 'w') as f: f.write(parsed.string) else: site.login(USER, PASSWORD) if page != parsed.string: page.edit(parsed.string, "Automated Table Update")
import mwclient from mwclient import Site import sys import scrapContent as scrap url="sindhipedia.org" user_name= 'Administrator' password= '******' page_name= sys.argv[1] site=Site(('http',url),path='/',) site.login(user_name, password) page = site.pages[sys.argv[1]] if sys.argv[2] == '-d': print 'Deleting Page !', sys.argv[1] page.delete() sys.exit() if (page.exists): print 'Page ' , sys.argv[1] ,'Already exists' sys.exit() else: print "Creating Page " , sys.argv[1] print page.can('edit') text= scrap.scrapDynamic(sys.argv[1] ,5); # result comes in sections so you have to define textspreadratio #print "Generator Output: ",text page.save(text, 'Edit Summary') print 'Created Page' , sys.argv[1] ,'!!'
def main(argv): # ------------- Constant Variables ---------------- DATA_URL = "https://raw.githubusercontent.com/CivClassic/AnsibleSetup/master/templates/public/plugins/FactoryMod/config.yml.j2" server_name = "CivClassic 2.0" MODE = "NONE" # ------------------------------------------------- try: opts, args = getopt.getopt(argv,"h",["markdown","wiki","offline","sandbox","help"]) except getopt.GetoptError: print("factoryMod.py --wiki") sys.exit(2) for opt,arg in opts: if opt in ('-h','--help'): print ("--wiki , --offline , --help") if opt in "--wiki": MODE = "WIKI" if opt in "--offline": MODE = "OFFLINE" if opt in "--none": MODE = "NONE" # Get the latest biomes yaml headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'} req = urllib.request.Request(url=DATA_URL, headers=headers) page_txt = "" #page_txt = "= Realistic Biomes Growth Rates =\n" # Download the config data = [] with urllib.request.urlopen(req) as url: data = yaml.load(url.read().decode()) #print(data) print("Config Downloaded, parsing data...") default_fuel_interval = data['default_fuel_consumption_intervall'] default_fuel = parseMaterials(data['default_fuel']) #default_fuel = "" # Everything in civclassics uses the same fuel so clearing for readibility factories = [] for factory in data['factories']: n_factory = {} fac = data['factories'][factory] print (fac['name']) n_factory['name'] = fac['name'] n_factory['recipes'] = [] n_factory['repair'] = [] n_factory['tables'] = {} # Get Setup Cost if fac['type'] == 'FCC': n_factory['setup'] = parseMaterials(fac['setupcost']) #print(n_factory['setup']) else: print("type",fac['type']) for recipe in fac['recipes']: rec = data['recipes'][recipe] name = rec['name'] r_in = "" r_out = "" time = rec['production_time'] fuel = "" if rec['type'] == 'PRODUCTION': r_in = parseMaterials(rec['input']) r_out = parseMaterials(rec['output']) #print("{} | {} -> {}".format(rec['name'],r_in,r_out)) elif rec['type'] == 'REPAIR': r_in = parseMaterials(rec['input']) r_out = "+{} health".format(rec['health_gained']) #print("Repair: {}".format(r_in)) elif rec['type'] == 'RANDOM': r_in = parseMaterials(rec['input']) r_out = "(Random Item)" table = [] # Parse the Loot Table Here for output in rec['outputs']: roll = {} poss = rec['outputs'][output] roll['name'] = clean_name(output) roll['chance'] = poss['chance']*100 roll['item'] = parseMaterials(rec['outputs'][output]) table.append(roll) n_factory['tables'][name] = table elif rec['type'] == 'UPGRADE': r_in = parseMaterials(rec['input']) r_out = "Convert factory to {}".format(rec['factory']) elif rec['type'] == 'COMPACT': r_in = "1 Crate, (Stack to Compact)" r_out = "(Compacted Stack)" elif rec['type'] == 'DECOMPACT': r_in = "(Compacted Stack)" r_out = "(Decompacted Stack)" elif rec['type'] == 'WORDBANK': pass elif rec['type'] == 'PRINTBOOK': r_in = parseMaterials(rec['input']) + ", 1 Oak Trapdoor (Printing Plate)" r_out = "{} Printed Books".format(rec['outputamount']) elif rec['type'] == 'PRINTINGPLATE': r_in = parseMaterials(rec['input']) + ", 1 Written Book" r_out = parseMaterials(rec['output']) + " (Printing Plate)" elif rec['type'] == 'PRINTINGPLATEJSON': r_in = parseMaterials(rec['input']) + ", 1 Written Book" r_out = parseMaterials(rec['output']) + " (Printing Plate)" elif rec['type'] == 'PRINTNOTE': r_in = "{}, {}".format(parseMaterials(rec['input']), '1 Oak Trapdoor (Printing Plate)') r_out = "{} {}".format(rec['outputamount'],rec['title']) else: print(rec['type'],rec['name']) #Fuel math interval = default_fuel_interval if 'fuel_consumption_intervall' in rec: interval = rec['fuel_consumption_intervall'] fuel_type = default_fuel if 'fuel' in rec: fuel_type = rec['fuel'] fuel = "{} {}".format(int(float(time[:-1])/float(interval[:-1])),fuel_type) parsed_rec = { "name":name, "input":r_in, "output":r_out, "time":time, "fuel":fuel } if rec['type'] != 'REPAIR': n_factory['recipes'].append(parsed_rec) else: n_factory['repair'].append(parsed_rec) #print(name,r_in,r_out,time,fuel) factories.append(n_factory) #print(n_factory) ''' if MODE == "MARKDOWN": txt = "" for fac in factories: txt += "#### {}\n".format(fac['name']) if 'setup' in fac: txt += "**Setup Cost**: {}\n".format(fac['setup']) txt += "| Recipe | Input | Output | Time | Fuel |\n" txt += "| --- | --- | --- | --- | --- |\n" for rec in fac['recipes']: txt += "| {} | {} | {} | {} | {} |\n".format( rec['name'], rec['input'], rec['output'], rec['time'], rec['fuel'] ) txt += "| *{}* | {} | *{}* | {} | {} |\n".format( fac['repair']['name'], fac['repair']['input'], fac['repair']['output'], fac['repair']['time'], fac['repair']['fuel'] ) txt += "\n" with open("factories.md","w") as f: f.write(txt) ''' if MODE == "WIKI" or MODE == "OFFLINE": print("Writing tables...") fac_txt = {} ua = "FactoryMod/0.0.1 Smal" site = Site('civwiki.org',clients_useragent=ua) site.login(USER,PASSWORD) if not os.path.exists('preview'): os.makedirs('preview') for fac in factories: # Generate Factory Table txt = "" txt += "{|class=\"wikitable mw-collapsible\"\n" txt += "|+ class=\"nowrap\" |{}\n".format(fac['name']) txt += "! Recipe !! Input !! Output !! Time !! Fuel \n" for rec in fac['recipes']: txt += "|-\n| {} || {} || {} || {} || {} \n".format( rec['name'], rec['input'], rec['output'], rec['time'], rec['fuel'] ) for rep in fac['repair']: txt += "|-\n| ''{}'' || {} || ''{}'' || {} || {} \n".format( rep['name'], rep['input'], rep['output'], rep['time'], rep['fuel'] ) if 'setup' in fac: txt += "|-\n| '''Create Factory''' || colspan=\"4\"| {}\n".format(fac['setup']) txt += "|}" # Generate Additional Tables for table_key in fac['tables']: txt += "\n{|class=\"wikitable mw-collapsible mw-collapsed sortable\"\n" txt += "|+ class=\"nowrap\" |{}\n".format(table_key) txt += "! Name !! Item !! Chance\n" for item in fac['tables'][table_key]: txt += "|-\n" txt += "| {} || {} || align=\"right\"| {}\n".format( item['name'], item['item'], round(item['chance'],5) ) txt += "|}" page_title = 'Template:FactoryModConfig {} ({})'.format(fac['name'],server_name) with open('preview/{}.txt'.format(page_title),'w') as f: f.write(txt) page = site.pages[page_title] if (page.text() != txt): print("DIFF",page_title) if MODE == "WIKI": page.edit(txt,"Automated Data Update") else: print("SAME",page_title) sleep(0.2) # To prevent rate limiting print("Done!")
cookies_file = '/data/project/deltaquad-bots/int-admin-cookies.txt' cookie_jar = MozillaCookieJar(cookies_file) if os.path.exists(cookies_file): # Load cookies from file, including session cookies (expirydate=0) cookie_jar.load(ignore_discard=True, ignore_expires=True) print('We have %d cookies' % len(cookie_jar)) connection = requests.Session() connection.cookies = cookie_jar # Tell Requests session to use the cookiejar. masterwiki = Site('en.wikipedia.org', pool=connection) print("Login status: ") print(masterwiki.logged_in) if not masterwiki.logged_in: masterwiki.login(login.username, login.password) # Save cookies to file, including session cookies (expirydate=0) print(connection.cookies) cookie_jar.save(ignore_discard=True, ignore_expires=True) pagelist = masterwiki.pages["User:AmandaNP/scriptcopy.js"].text() pagelist = pagelist.split("\n") for wikipage in pagelist: wikipage = wikipage.split(",") postpage = masterwiki.pages[wikipage[0]] source = wikipage[1] website = urllib.request.urlopen(source) webtext = website.read() if postpage.text == webtext: continue
import datetime from mwclient import Site site = Site('https://lol.gamepedia.com', path="/") # Set wiki site.login('RheingoldRiver@BotPasswordName', 'smldrgsrthmldyhj') limit = -1 now = datetime.datetime.utcnow() now_timestamp = now.isoformat() then = now - datetime.timedelta(hours=4) # change hours if needed last_timestamp = then.isoformat() revisions = site.api('query', format='json', list='recentchanges', rcstart=now_timestamp, rcend=last_timestamp, rcprop='title|ids', rclimit='max', rcdir='older') pages = [] pages_used = {} revs = {} failed_pages = [] for revision in revisions['query']['recentchanges']: revs[revision['revid']] = True if revision['title'] in pages_used: pass