def check(ip): ip = str(ip) data = web.text(base % web.quote(ip)).replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning( str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) if 'Double check your URL to make sure this error' in item: return return '{b}%s{b}' % item.strip()
def get_time(code, input): """time <abbreviated timezone> -- Returns the current time. Yucky""" fmt = 'Time in {timezone} is {hour}:{minute}:{second} ({month}/{day}/{year})' if not input.group(2): timezone = 'EST' elif len(input.group(2).split()) > 1: timezone = 'EST' else: timezone = input.group(2) try: try: r = web.text(uri.format(timezone=timezone.strip().upper()), timeout=15) except: return code.say('Unable to calculate time for that timezone.') date, time = r.split('T') year, month, day = date.split('-')[::1] hour, minute, second = time.split(':', 2) if int(hour) > 12: hour = str(int(hour) - 12) second = second.split('-', 1)[0].split('+', 1)[0] return code.say( fmt.format(month=month, day=day, year=year, hour=hour, minute=minute, second=second, timezone=timezone.upper())) except: return code.say('Incorrect timezone. Syntax: .time <timezone>')
def power(code, data): """power - shows power outages in Nova Scotia :D""" try: data = web.text("http://test.fm1337.com/") return code.say(data) except: return code.say("{red}Failed to fetch information!")
def check(ip): ip = str(ip) data = web.text(base % web.quote(ip)).replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning(str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) if 'Double check your URL to make sure this error' in item: return return '{b}%s{b}' % item.strip()
def get_time(code, input): """time <abbreviated timezone> -- Returns the current time. Yucky""" fmt = 'Time in {timezone} is {hour}:{minute}:{second} ({month}/{day}/{year})' if not input.group(2): timezone = 'EST' elif len(input.group(2).split()) > 1: timezone = 'EST' else: timezone = input.group(2) try: try: r = web.text(uri.format(timezone=timezone.strip().upper()), timeout=15) except: return code.say('Unable to calculate time for that timezone.') date, time = r.split('T') year, month, day = date.split('-')[::1] hour, minute, second = time.split(':', 2) if int(hour) > 12: hour = str(int(hour) - 12) second = second.split('-', 1)[0].split('+', 1)[0] return code.say(fmt.format( month=month, day=day, year=year, hour=hour, minute=minute, second=second, timezone=timezone.upper() )) except: return code.say('Incorrect timezone. Syntax: .time <timezone>')
def user_lookup(code, id, showerror=True): try: data = web.text( 'http://steamdb.info/calculator/?player={id}¤cy=us'.format( id=id), timeout=10) if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say( '{b}Unabled to retrieve info, that account is {red}private{c}!' ) return realname = re.search(r'<title>(?P<name>.*?) \xb7 .*?</title>', data).group('name') status = re.search( r'<td class="span2">Status</td>.*?<td>(?P<status>.*?)</td>', data).group('status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') form = 'profiles' if str(id).isdigit() else 'id' url = 'http://steamcommunity.com/{}/'.format(form) + id return code.say( '{b}%s{b} - {green}%s{c} - %s - %s' % (web.escape(realname), web.striptags(status), details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def get_tweets(url, sender_uid=False): try: data = web.text(url).replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['url'] = list(r_tweeturl.findall(tweet)[0]) tmp['url'] = 'https://twitter.com/%s/status/%s' % (tmp['url'][0], tmp['url'][1]) tmp['full'] = web.escape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]) tweet_data = r_tweet.findall(tweet)[0].strip() tweet_data = re.sub(r_url, '\g<url>', tweet_data) tmp['text'] = web.escape(web.striptags(tweet_data)) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace(uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip('@'): tmp['text'] = tmp['text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip('@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def kernel(code, input): """ kernel - Gets the latest kernel versions from kernel.org """ data = web.text('https://www.kernel.org/finger_banner') data = re.findall(r'The latest (.*?) version of the Linux kernel is: (.*)', data) kernel_versions = [ "{b}%s{b} - %s" % (item[0], item[1].strip()) for item in data ] code.say("Latest kernels: %s" % ", ".join(kernel_versions))
def speedtest(code, input): id = input.group(2) if "/a/" in input.group(1): raw = web.clean(web.text(uri_alt % id)) else: raw = web.clean(web.text(uri % id)) raw = raw.split('<div class="share-main">', 1)[1] raw = raw.split('</div><!--/share-main-->', 1)[0] results = web.findin(r'<p>.*?</p>', raw) keys = ['Download', 'Upload', 'Ping', 'Device', 'ISP', 'Server'] tmp = [] for i in range(0, len(results) - 1): if len(web.striptags(results[i])) < 1: continue tmp.append('{b}%s{b}: %s' % (keys[i], web.striptags(results[i]))) code.say(' {b}-{b} '.join(tmp))
def xkcd(code, input): """ xkcd -- Pull a random comic from http://xkcd.com/ """ try: data = web.text("http://c.xkcd.com/random/comic/") comic = re.search(r'<img src="(.*?)" title="(.*?)" alt="(.*?)" />', data).groups() # Make sure to disable link shortening, as most IRC clients that auto-embed # images break if there is a redirect in place. code.msg(input.sender, "{desc} - http:{img} - xkcd".format(desc=web.decode(comic[1]), img=comic[0]), shorten_urls=False) except: code.say("{red}Error fetching data from xkcd.com")
def cyanide(code, input): """ cyanide -- Pull a random comic from http://explosm.net/ """ try: data = web.text("http://explosm.net/comics/random/") img = re.search(r'<meta property="og:image" content="(.*?)">', data).groups(1)[0] # Make sure to disable link shortening, as most IRC clients that auto-embed # images break if there is a redirect in place. code.msg(input.sender, "{img} - Cyanide & Happiness".format(img=img), shorten_urls=False) except: code.say("{red}Error fetching data from explosm.net.")
def fml_fetch(random=False, by_id=False, search=False): if not random and not by_id and not search: raise ValueError args = { "language": language, "key": key } try: if random: return fml_fmt(web.text(uri % '/random/1', params=args)) elif by_id: return fml_fmt(web.text(uri % '/{0}/nocomment'.format(str(by_id)), params=args)) elif search: # search should be a tuple of (query, id) pair args['search'] = search[0] return fml_fmt(web.text(uri % '/search', params=args), id=search[1]) except: return False
def isup(code, input): """isup <url> - Is it down for everyone, or just you?""" try: data = web.text("http://isup.me/%s" % input.group(2)) if "not just you" in data: return code.msg(input.sender, "{red}%s is down! It's not just you!" % input.group(2), shorten_urls=False) elif "It's just you." in data: return code.msg(input.sender, "{green}%s is up! Must just be you!" % input.group(2), shorten_urls=False) else: return code.say("{red}Failed to get the status of the website!") except: return code.say("{red}Failed to get the status of the website!")
def py(code, input): """python <commands> -- Execute Python inside of a sandbox""" query = input.group(2).encode('utf-8') try: answer = web.text(py_uri + web.quote(query)) if answer: answer = answer.replace('\n', ' ').replace( '\t', ' ').replace('\r', '') return code.reply(answer) else: return code.reply('Sorry, no {b}%s{b}') except: return code.reply('{red}The server did not return an answer.')
def dinner(code, input): """fd -- WHAT DO YOU WANT FOR F*****G DINNER?""" err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.' try: data = web.text(uri) results = re_mark.findall(data) if not results: return code.say(err) url, food = results[0][0], web.escape(results[0][1]) code.say('WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s' % ( food.upper(), url)) except: return code.say(err)
def factoid(code, input): """ `<word> -- Shows what data is associated with <word>. ` <add|delete|info> [args] -- for management """ if len(input.group().strip()) == 1: return # If it's a management command... if input.group().startswith('` '): if not input.admin: return code.reply( '{red}You need to be an admin to use that command!') return factoid_manage(input.group().split(' ', 1)[1], code, input) db = database.get(code.nick, 'factoids') if not db: db = [] if len(input.group(1).strip().split()) <= 1: id, arguments = input.group(1), '' else: id, arguments = input.group(1).split(' ', 1) id = id.lower() if id not in db: # code.say('{red}That command doesn\'t exist. (If Admin, add it with "{purple}` add <name> <data>{red}")') return # It doesn't know the command. Instead of spamming, just act silent. f = db[id] if f.startswith('<py>'): data = f[4:].strip() variables = 'input="""{}"""; nick="{}"; sender="{}"; bot="{}";'.format( arguments.replace('"', '\\"'), input.nick, input.sender, code.nick) result = web.exec_py(variables + data) if 'Traceback (most recent call last)' in result: result = 'Python error: ' + web.haste(result) return code.say(result) elif f.startswith('<act>'): result = f[5:].strip() return code.action(result) elif f.startswith('<url>'): url = f[5:].strip() try: return code.say(web.text(url)) except: return code.say('Failed to fetch the URL.') else: return code.say(f)
def steam_app_auto(code, input): data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10) output = [] output.append( re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name # Metacritic Score score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data) if len(score) < 1: output.append('Rating: N/A') else: output.append('Rating: %s/100' % score[0]) # Released yet? if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data): output.append('{blue}Prerelease{c}') # OS List if '<td class="span3">oslist</td>' in data: tmp = re.findall( r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0] tmp = re.findall(r'title="(.*?)"', tmp) output.append('OS: ' + ', '.join(tmp)) else: output.append('OS: N/A') # With pricing, there are a few options... # 1. Free, 2. Cost, 3. Cost with discount # As well, 1. Not released (May cause issues with rendering the price # table) or 2. released if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data): output.append('{green}Free{c}') else: tmp = re.findall( # e.g. $19.99 at -20% r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>' + '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>', data)[0][0] tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}', tmp) tmp = re.sub( r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)', r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp)) output.append(tmp) output.append( 'http://store.steampowered.com/app/%s/' % re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0]) return str(' - {b}'.join(output).replace(': ', ': {b}'))
def gettld(code, input): """tld <shorthand> -- Show information about the given Top Level Domain.""" page = web.text(uri) search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(input.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if not matches: search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(input.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = list(matches[0]) i = 0 while i < len(matches): matches[i] = r_tag.sub("", matches[i]) i += 1 desc = matches[2] if len(desc) > 400: desc = desc[:400] + "..." reply = "%s -- %s. IDN: %s, DNSSEC: %s" % ( matches[1], desc, matches[3], matches[4] ) code.say(reply) else: search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(unicode(input.group(2))) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = matches[0] dict_val = dict() dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val[ "idn"], dict_val["dnssec"], dict_val["sld"] = matches for key in dict_val: if dict_val[key] == " ": dict_val[key] = "N/A" dict_val[key] = r_tag.sub('', dict_val[key]) if len(dict_val["notes"]) > 400: dict_val["notes"] = dict_val["notes"][:400] + "..." reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % ( dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] ) else: reply = "No matches found for TLD: {0}".format( unicode(input.group(2))) code.say(reply)
def translate(text, input='auto', output='en'): raw = False if output.endswith('-raw'): output = output[:-4] raw = True uri = 'https://translate.google.com/translate_a/t' params = { 'sl': web.quote(input), 'tl': web.quote(output), 'js': 'n', 'prev': '_t', 'hl': 'en', 'ie': 'UTF-8', 'text': web.quote(text), 'client': 't', 'multires': '1', 'sc': '1', 'uptl': 'en', 'tsel': '0', 'ssel': '0', 'otf': '1', } result = web.text(uri, params=params) # this is hackish # this makes the returned data parsable by the json module result = result.replace(',,', ',').replace('[,', '["",') while ',,' in result: result = result.replace(',,', ',null,') data = json.loads(result) if raw: return str(data), 'en-raw' try: language = data[2] except: language = '?' if isinstance(language, list): language = data[-2][0][0] return ''.join(x[0] for x in data[0]), language
def steam_app_auto(code, input): data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10) output = [] output.append( re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name # Metacritic Score score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data) if len(score) < 1: output.append('Rating: N/A') else: output.append('Rating: %s/100' % score[0]) # Released yet? if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data): output.append('{blue}Prerelease{c}') # OS List if '<td class="span3">oslist</td>' in data: tmp = re.findall( r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0] tmp = re.findall(r'title="(.*?)"', tmp) output.append('OS: ' + ', '.join(tmp)) else: output.append('OS: N/A') # With pricing, there are a few options... # 1. Free, 2. Cost, 3. Cost with discount # As well, 1. Not released (May cause issues with rendering the price # table) or 2. released if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data): output.append('{green}Free{c}') else: tmp = re.findall( # e.g. $19.99 at -20% r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>' + '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>', data)[0][0] tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}', tmp) tmp = re.sub( r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)', r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp)) output.append(tmp) output.append('http://store.steampowered.com/app/%s/' % re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0]) return str(' - {b}'.join(output).replace(': ', ': {b}'))
def gen_db(botname): global uc_names, cp_names, uc # http://www.unicode.org/reports/tr44/#UnicodeData.txt output.info('Downloading Unicode data') data = web.text('http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt') data = data.split('\n') del data[-1] # http://www.unicode.org/reports/tr44/#UnicodeData.txt for line in data: tmp = line.split(';') name = tmp[1] if tmp[10]: name = name + ' ' + str(tmp[10]) uc[name] = tmp uc_names.append(name) cp_names[tmp[0]] = name database.set(botname, {'uc': uc, 'uc_names': uc_names, 'cp_names': cp_names, 'time': int(time.time())}, 'unicodedata')
def gen_db(botname): global uc_names, cp_names, uc # http://www.unicode.org/reports/tr44/#UnicodeData.txt output.info("Downloading Unicode data") data = web.text("http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt") data = data.split("\n") del data[-1] # http://www.unicode.org/reports/tr44/#UnicodeData.txt for line in data: tmp = line.split(";") name = tmp[1] if tmp[10]: name = name + " " + str(tmp[10]) uc[name] = tmp uc_names.append(name) cp_names[tmp[0]] = name database.set( botname, {"uc": uc, "uc_names": uc_names, "cp_names": cp_names, "time": int(time.time())}, "unicodedata" )
def geoip(code, input): """ GeoIP user on join. """ if not code.config('geoip_on_join'): return allowed = [channel.lower() for channel in code.config('geoip_on_join', [])] if True in [True if item.lower() in input.host else False for item in ignore] or \ input.nick == code.nick or not input.channel or input.channel not in allowed: return try: if not re.match(r'^[A-Za-z0-9\.\_\-\:]+$', input.host): return country = web.text("http://geoip.cf/api/%s/country" % input.host, timeout=4) if country: code.say('{green}User is connecting from %s' % country) except: return
def wa(code, input): """Wolfram Alpha search - It's slow. """ query = input.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.text(uri + web.quote(query), timeout=14) except: return code.say('It seems WolframAlpha took too long to respond!') if answer and 'json stringified precioussss' not in answer: answer = answer.strip('\n').split(';') for i in range(len(answer)): answer[i] = answer[i].replace('|', '').strip() answer = '{purple}{b}WolframAlpha: {c}{b}' + ' - '.join(answer).replace('\\', '').replace('->', ': ') while ' ' in answer: answer = answer.replace(' ', ' ') return code.say(web.escape(answer)) else: return code.reply('{red}Sorry, no result.')
def fucking_weather(code, input): """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather""" if not input.group(2): return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.') try: args = { "where": web.quote(input.group(2)) } data = web.text('http://thefuckingweather.com/', params=args) temp = re.compile( r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0] temp = web.striptags(temp).replace(' ', '').replace('"', '') remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0] remark = re.sub(r'\<.*?\>', '', remark).strip() flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0] flavor = re.sub(r'\<.*?\>', '', flavor).strip() return code.say('%s {b}%s{b}. %s' % (web.escape(temp), remark, flavor)) except: return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
def fml_id_search(query_id): """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API.""" try: args = { "language": language, "key": key } r = web.text('http://api.fmylife.com/view/{}/nocomment'.format(str(query_id)), params=args) except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.escape(web.striptags(fml[0])), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1), '+': web.striptags(agree[0]), '-': web.striptags(deserved[0]) }
def fml_random(): """fml - Retrieve random FML's, via FMyLife.com's dev API.""" try: args = { "language": language, "key": key } r = web.text('http://api.fmylife.com/view/random/1', params=args) except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.escape(web.striptags(fml[0])), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]), '-': web.striptags(deserved[0]) }
def get_tweets(url, sender_uid=False): try: data = web.text(url).replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['url'] = list(r_tweeturl.findall(tweet)[0]) tmp['url'] = 'https://twitter.com/%s/status/%s' % (tmp['url'][0], tmp['url'][1]) tmp['full'] = web.escape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]) tweet_data = r_tweet.findall(tweet)[0].strip() tweet_data = re.sub(r_url, '\g<url>', tweet_data) tmp['text'] = web.escape(web.striptags(tweet_data)) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace( uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip( '@'): tmp['text'] = tmp[ 'text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip( '@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def user_lookup(code, id, showerror=True): try: data = web.text('http://steamdb.info/calculator/?player={id}¤cy=us'.format(id=id), timeout=10) if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say('{b}Unabled to retrieve info, that account is {red}private{c}!') return realname = re.search(r'<title>(?P<name>.*?) \xb7 .*?</title>', data).group('name') status = re.search( r'<td class="span2">Status</td>.*?<td>(?P<status>.*?)</td>', data).group('status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') form = 'profiles' if str(id).isdigit() else 'id' url = 'http://steamcommunity.com/{}/'.format(form) + id return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (web.escape(realname), web.striptags(status), details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def fml_search(query, id): # ID is index of search query """fml - Retrieve FML search results, via FMyLife.com's dev API.""" # Try to query FML try: query = re.sub(r'[^\w\s]', '+', query) query = query.replace('.', '+') while query.find('++') > -1: query = query.replace('++', '+').strip('+') data = { "search": query, "language": language, "key": key } r = web.text('http://api.fmylife.com/view/search', params=data) except: return # find god awful FML fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) count = len(fml) if count == 0: return code.say('The definition for "{purple}%s{c}" wasn\'t found.' % ' '.join(parts)) if id > count: id = count # Who agrees agree = re.compile(r'<agree>.*?</agree>').findall(r) # It's their fault! deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.striptags(fml[id - 1]), 'fml-id': fmlid[id - 1].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[id - 1]), '-': web.striptags(deserved[id - 1]), 'id': id, 'max': count }
def kernel(code, input): """ kernel - Gets the latest kernel versions from kernel.org """ data = web.text('https://www.kernel.org/finger_banner') data = re.findall(r'The latest (.*?) version of the Linux kernel is: (.*)', data) kernel_versions = ["{b}%s{b} - %s" % (item[0], item[1].strip()) for item in data] code.say("Latest kernels: %s" % ", ".join(kernel_versions))