def fml_search(query, id): # ID is index of search query """fml - Retrieve FML search results, via FMyLife.com's dev API.""" # Try to query FML try: query = re.sub(r'[^\w\s]', '+', query) query = query.replace('.', '+') while query.find('++') > -1: query = query.replace('++', '+').strip('+') r = web.get('http://api.fmylife.com/view/search?search=%s&language=%s&key=%s' % (query, language, key)).read() except: return # find god awful FML fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) count = len(fml) if count == 0: return code.say('The definition for "{purple}%s{c}" wasn\'t found.' % ' '.join(parts)) if id > count: id = count # Who agrees agree = re.compile(r'<agree>.*?</agree>').findall(r) # It's their fault! deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.striptags(fml[id - 1]).strip(), 'fml-id': fmlid[id - 1].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[id - 1]).strip(), '-': web.striptags(deserved[id - 1]).strip(), 'id': id, 'max': count }
def get_tweets(url, sender_uid=False): try: data = urllib2.urlopen(url).read().replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['full'] = web.htmlescape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]).strip() tweet_data = r_tweet.findall(tweet)[0].strip() urls = r_url.findall(tweet_data) for url in urls: url = list(url) tweet_data = tweet_data.replace(url[1], url[0]) tmp['text'] = web.htmlescape(web.striptags(tweet_data).strip()) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace(uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip('@'): tmp['text'] = tmp['text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip('@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def fml_search(query, id): # ID is index of search query """fml - Retrieve FML search results, via FMyLife.com's dev API.""" # Try to query FML try: query = re.sub(r'[^\w\s]', '+', query) query = query.replace('.', '+') while query.find('++') > -1: query = query.replace('++', '+').strip('+') r = web.get( 'http://api.fmylife.com/view/search?search=%s&language=%s&key=%s' % (query, language, key)).read() except: return # find god awful FML fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) count = len(fml) if count == 0: return code.say('The definition for "{purple}%s{c}" wasn\'t found.' % ' '.join(parts)) if id > count: id = count # Who agrees agree = re.compile(r'<agree>.*?</agree>').findall(r) # It's their fault! deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.striptags(fml[id - 1]).strip(), 'fml-id': fmlid[id - 1].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[id - 1]).strip(), '-': web.striptags(deserved[id - 1]).strip(), 'id': id, 'max': count }
def fml_fmt(data, id=0): raw_items = [item for item in re.compile(r'<item .*?>.*?</item>').findall(data)] if not raw_items: return False items, count = [], 0 for f in raw_items: count += 1 try: items.append({ 'fml': web.escape(web.striptags(re.compile(r'<text>(.*?)</text>').findall(f)[0])).replace('FML', '{red}FML{c}'), 'id': int(re.compile(r'<item .*id="([0-9]+)".*>').findall(f)[0]), 'uid': count, 'max': len(raw_items), 'agree': int(web.striptags(re.compile(r'<agree>(.*?)</agree>').findall(f)[0])), 'deserved': int(web.striptags(re.compile(r'<deserved>(.*?)</deserved>').findall(f)[0])) }) except: items.append(False) if id < 1: id = 1 if id > len(raw_items): id = len(raw_items) id = id - 1 return items[id]
def get_tweets(url, sender_uid=False): try: data = web.text(url).replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['url'] = list(r_tweeturl.findall(tweet)[0]) tmp['url'] = 'https://twitter.com/%s/status/%s' % (tmp['url'][0], tmp['url'][1]) tmp['full'] = web.escape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]) tweet_data = r_tweet.findall(tweet)[0].strip() tweet_data = re.sub(r_url, '\g<url>', tweet_data) tmp['text'] = web.escape(web.striptags(tweet_data)) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace(uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip('@'): tmp['text'] = tmp['text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip('@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def search(code, input): """Queries DuckDuckGo for the specified input.""" try: data = web.get(uri, params={'q': input.group(2)}) tmp = data.text.replace('\r', '').replace('\n', '').strip() target = r'(?im)<div class="results_links .*?(?!.*web\-result\-sponsored)">.*?<a .*? href="(.*?)">.*?</a>.*?' \ '<div class="snippet">(.*?)</div>.*?<div class="url">(.*?)</div>' found = list(re.findall(target, tmp)) if len(found) > url_count: found = found[:url_count] results = [] if len(found) < 2: return code.say('{b}No results found{b}') count = 0 for item in found: i = list(item) result = {} result['url'] = web.escape(web.striptags(i[0])) result['short'] = web.escape(web.striptags(i[2]).capitalize().split('/')[0]) result['title'] = web.escape(web.striptags(i[1])) if len(result['title']) > title_length: result['title'] = result['title'][:title_length] + '{b}...{b}' results.append('{b}%s{b} - {%s}%s{c} - %s' % (result['short'], url_colors[count], result['title'], result['url'])) count += 1 return code.say(' | '.join(results)) except Exception as e: output.error('Error in search.py: %s' % str(e)) return code.say('{b}Unable to search for %s{b}' % input.group(2))
def define(code, input): try: data = web.json(uri % web.quote(input.group(2)))[0] except: return code.reply('{red}Failed to get definition!') # Go through filters to remove extra stuff that's not needed. word = data['html'] word = web.striptags(word).strip() word = web.htmlescape(word) word = word.replace('\\n', '').replace('\n', '') while ' ' in word: word = word.replace(' ', ' ') word = word.encode('ascii', 'ignore') if 'is not in the dictionary.' in word: return code.say('Definition for {b}%s{b} not found' % input.group(2)) # Everything below here is for colors only word = '{b}{purple}%s{c}{b}: %s' % (data['query'], word[len(data['query']) + 1::]) word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}') if len(word) > 250: word = word[:245] + '{c}{b}[...]' code.say(word)
def check(ip): ip = str(ip) data = web.get(base % web.quote(ip)).read().replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning(str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) return '{b}%s{b}' % item.strip()
def user_lookup(code, id, showerror=True): try: data = web.text( 'http://steamdb.info/calculator/?player={id}¤cy=us'.format( id=id), timeout=10) if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say( '{b}Unabled to retrieve info, that account is {red}private{c}!' ) return realname = re.search(r'<title>(?P<name>.*?) \xb7 .*?</title>', data).group('name') status = re.search( r'<td class="span2">Status</td>.*?<td>(?P<status>.*?)</td>', data).group('status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') form = 'profiles' if str(id).isdigit() else 'id' url = 'http://steamcommunity.com/{}/'.format(form) + id return code.say( '{b}%s{b} - {green}%s{c} - %s - %s' % (web.escape(realname), web.striptags(status), details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def user_lookup(code, id, showerror=True): try: data = web.get( 'http://steamdb.info/calculator/?player=%s¤cy=us' % id, timeout=10).read() if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say( '{b}Unabled to retrieve info, that account is {red}private{c}!') return realname = re.search( r'<title>.*?</title>', data).group().split('>')[1].split(' \xc2\xb7')[0] status = re.search( r'<td class="span2">Status</td>.*?<td>.*?</td>', data).group() status = web.striptags(status).strip('Status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') url = 'http://steamcommunity.com/id/' + id return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (realname, status, details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def check(ip): ip = str(ip) data = web.text(base % web.quote(ip)).replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning( str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) if 'Double check your URL to make sure this error' in item: return return '{b}%s{b}' % item.strip()
def define(code, input): try: data = web.json(uri % web.quote(input.group(2)))[0] except: return code.reply('{red}Failed to get definition!') # Go through filters to remove extra stuff that's not needed. word = data['html'] word = web.striptags(word).strip() word = web.htmlescape(word) word = word.replace('\\n', '').replace('\n', '') while ' ' in word: word = word.replace(' ', ' ') word = word.encode('ascii', 'ignore') if 'is not in the dictionary.' in word: return code.say('Definition for {b}%s{b} not found' % input.group(2)) # Everything below here is for colors only word = '{b}{purple}%s{c}{b}: %s' % ( data['query'], word[len(data['query']) + 1::]) word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}') if len(word) > 250: word = word[:245] + '{c}{b}[...]' code.say(word)
def lastfm(code, input): user = input.group(2).split()[0].strip().lower() # Charset fuckery data = getdata(user).decode('utf-8').encode('ascii', 'ignore') if not data: return code.say('Username %s does not exist in the last.fm database.' % (user)) song = web.striptags(re.compile(r'<title>.*?</title>').findall(data)[1]) code.reply('{purple}' + web.htmlescape(song).replace(' ', ' -- ', 1) + '{c} {red}(via Last.Fm)')
def calc(code, input): try: data = web.json(calc_uri, params={"q": input.group(2).replace('^', '**'), "format": "json"}) if data['AnswerType'] != 'calc': return code.reply('Failed to calculate') answer = web.striptags(data['Answer']) return code.say(answer) except: return code.reply('Failed to calculate!')
def speedtest(code, input): id = input.group(2) if "/a/" in input.group(1): raw = web.clean(web.text(uri_alt % id)) else: raw = web.clean(web.text(uri % id)) raw = raw.split('<div class="share-main">', 1)[1] raw = raw.split('</div><!--/share-main-->', 1)[0] results = web.findin(r'<p>.*?</p>', raw) keys = ['Download', 'Upload', 'Ping', 'Device', 'ISP', 'Server'] tmp = [] for i in range(0, len(results) - 1): if len(web.striptags(results[i])) < 1: continue tmp.append('{b}%s{b}: %s' % (keys[i], web.striptags(results[i]))) code.say(' {b}-{b} '.join(tmp))
def fml_random(): """fml - Retrieve random FML's, via FMyLife.com's dev API.""" try: r = web.get('http://api.fmylife.com/view/random/1?language=%s&key=%s' % ( language, key )).read() except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.htmlescape(web.striptags(fml[0]).strip()), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]).strip(), '-': web.striptags(deserved[0]).strip() }
def calc(code, input): try: data = web.json(uri % web.quote(input.group(2).replace('^', '**'))) if data['AnswerType'] != 'calc': return code.reply('Failed to calculate') answer = web.striptags(data['Answer']) return code.say(answer) except: return code.reply('Failed to calculate!')
def lastfm(code, input): """ lfm <username> -- Pull last played song for the user """ user = input.group(2).split()[0].strip().lower() data = getdata(user) data = data.text.encode('ascii', 'ignore') if not data: return code.say('Username {} does not exist in the last.fm database.'.format(user)) song = web.striptags(re.compile(r'<title>.*?</title>').findall(data)[1]) code.reply('{purple}' + web.escape(song).replace(' ', ' -- ', 1) + '{c} {red}(via Last.Fm)')
def fml_id_search(query_id): """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API.""" try: r = web.get('http://api.fmylife.com/view/%s/nocomment?language=%s&key=%s' % ( str(query_id), language, key )).read() except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.htmlescape(web.striptags(fml[0]).strip()), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]).strip(), '-': web.striptags(deserved[0]).strip() }
def fml_id_search(query_id): """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API.""" try: args = { "language": language, "key": key } r = web.text('http://api.fmylife.com/view/{}/nocomment'.format(str(query_id)), params=args) except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.escape(web.striptags(fml[0])), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1), '+': web.striptags(agree[0]), '-': web.striptags(deserved[0]) }
def fml_random(): """fml - Retrieve random FML's, via FMyLife.com's dev API.""" try: args = { "language": language, "key": key } r = web.text('http://api.fmylife.com/view/random/1', params=args) except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.escape(web.striptags(fml[0])), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]), '-': web.striptags(deserved[0]) }
def get_tweets(url, sender_uid=False): try: data = web.text(url).replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['url'] = list(r_tweeturl.findall(tweet)[0]) tmp['url'] = 'https://twitter.com/%s/status/%s' % (tmp['url'][0], tmp['url'][1]) tmp['full'] = web.escape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]) tweet_data = r_tweet.findall(tweet)[0].strip() tweet_data = re.sub(r_url, '\g<url>', tweet_data) tmp['text'] = web.escape(web.striptags(tweet_data)) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace( uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip( '@'): tmp['text'] = tmp[ 'text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip( '@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def calc(code, input): try: data = web.json(calc_uri, params={ "q": input.group(2).replace('^', '**'), "format": "json" }) if data['AnswerType'] != 'calc': return code.reply('Failed to calculate') answer = web.striptags(data['Answer']) return code.say(answer) except: return code.reply('Failed to calculate!')
def get_tweets(url, sender_uid=False): try: data = urllib2.urlopen(url).read().replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['full'] = web.htmlescape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]).strip() tweet_data = r_tweet.findall(tweet)[0].strip() urls = r_url.findall(tweet_data) for url in urls: url = list(url) tweet_data = tweet_data.replace(url[1], url[0]) tmp['text'] = web.htmlescape(web.striptags(tweet_data).strip()) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace( uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip( '@'): tmp['text'] = tmp[ 'text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip( '@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False
def steam_app_auto(code, input): data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10) output = [] output.append( re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name # Metacritic Score score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data) if len(score) < 1: output.append('Rating: N/A') else: output.append('Rating: %s/100' % score[0]) # Released yet? if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data): output.append('{blue}Prerelease{c}') # OS List if '<td class="span3">oslist</td>' in data: tmp = re.findall( r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0] tmp = re.findall(r'title="(.*?)"', tmp) output.append('OS: ' + ', '.join(tmp)) else: output.append('OS: N/A') # With pricing, there are a few options... # 1. Free, 2. Cost, 3. Cost with discount # As well, 1. Not released (May cause issues with rendering the price # table) or 2. released if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data): output.append('{green}Free{c}') else: tmp = re.findall( # e.g. $19.99 at -20% r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>' + '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>', data)[0][0] tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}', tmp) tmp = re.sub( r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)', r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp)) output.append(tmp) output.append( 'http://store.steampowered.com/app/%s/' % re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0]) return str(' - {b}'.join(output).replace(': ', ': {b}'))
def steam_app_auto(code, input): data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10) output = [] output.append( re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name # Metacritic Score score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data) if len(score) < 1: output.append('Rating: N/A') else: output.append('Rating: %s/100' % score[0]) # Released yet? if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data): output.append('{blue}Prerelease{c}') # OS List if '<td class="span3">oslist</td>' in data: tmp = re.findall( r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0] tmp = re.findall(r'title="(.*?)"', tmp) output.append('OS: ' + ', '.join(tmp)) else: output.append('OS: N/A') # With pricing, there are a few options... # 1. Free, 2. Cost, 3. Cost with discount # As well, 1. Not released (May cause issues with rendering the price # table) or 2. released if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data): output.append('{green}Free{c}') else: tmp = re.findall( # e.g. $19.99 at -20% r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>' + '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>', data)[0][0] tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}', tmp) tmp = re.sub( r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)', r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp)) output.append(tmp) output.append('http://store.steampowered.com/app/%s/' % re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0]) return str(' - {b}'.join(output).replace(': ', ': {b}'))
def fucking_weather(code, input): """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather""" if not input.group(2): return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.') try: args = { "where": web.quote(input.group(2)) } data = web.text('http://thefuckingweather.com/', params=args) temp = re.compile( r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0] temp = web.striptags(temp).replace(' ', '').replace('"', '') remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0] remark = re.sub(r'\<.*?\>', '', remark).strip() flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0] flavor = re.sub(r'\<.*?\>', '', flavor).strip() return code.say('%s {b}%s{b}. %s' % (web.escape(temp), remark, flavor)) except: return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
def search(code, input): """Queries Google for the specified input.""" r = google_search(input.group(2)) if not r: return code.reply("Problem getting data from Google.") if not r['responseData']['results']: return code.reply("No results found for '{purple}%s{c}'." % input.group(2)) urls = r['responseData']['results'] if len(urls) > 3: urls = urls[0:3] count, time = r['responseData']['cursor']['resultCount'], r[ 'responseData']['cursor']['searchResultTime'] + 's' # Make the search count prettified count_commas = [m.start() for m in re.finditer(r'{}'.format(re.escape(',')), count)] if len(count_commas) == 1: count = count.split(',', 1)[0] + 'k' elif len(count_commas) == 2: count = count.split(',', 1)[0] + 'm' elif len(count_commas) == 3: count = count.split(',', 1)[0] + 'b' output = [] r_type = code.format('{b}{title}{b}{c} - {link}') colors, color_count = ['{blue}', '{teal}', '{green}'], 0 for url in urls: # Change colors based on priority color = colors[color_count] color_count += 1 # Remove html formatting title = web.striptags(web.htmlescape(url['title'])) # Restrict sizing of titles to no longer than 50 chars if len(title) > 50: title = title[0:44] + '[...]' # Shorten URL to fit more responses cleaner link = url['url'] output.append(color + r_type.format(title=title, link=link)) code.say('%s ({b}%s{b}, {b}%s{b} results)' % (' | '.join(output), time, count))
def user_lookup(code, id, showerror=True): try: data = web.text('http://steamdb.info/calculator/?player={id}¤cy=us'.format(id=id), timeout=10) if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say('{b}Unabled to retrieve info, that account is {red}private{c}!') return realname = re.search(r'<title>(?P<name>.*?) \xb7 .*?</title>', data).group('name') status = re.search( r'<td class="span2">Status</td>.*?<td>(?P<status>.*?)</td>', data).group('status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') form = 'profiles' if str(id).isdigit() else 'id' url = 'http://steamcommunity.com/{}/'.format(form) + id return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (web.escape(realname), web.striptags(status), details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def define(code, input): try: data = web.json(uri.format(word=web.quote(input.group(2))))[0] except: return code.reply('{red}Failed to get definition!') # Go through filters to remove extra stuff that's not needed. word = data['html'] word = web.striptags(word) word = web.escape(word) word = word.replace('\\n', '').replace('\n', '') while ' ' in word: word = word.replace(' ', ' ') word = word.encode('ascii', 'ignore') if len(word) > 380: word = word[:375] + '{c}{b}[...]' # loop through and replace all possible type names for name in highlight: name = ' {} '.format(name) if data['query'].lower().strip() == name.lower(): continue tmp = re.findall(name, word, flags=re.IGNORECASE) for item in tmp: word = word.replace(item, " [{blue}{b}%s{b}{c}] " % item.strip()) if 'is not in the dictionary.' in word: return code.say('Definition for {b}%s{b} not found' % input.group(2)) name = data['query'][0].upper() + data['query'][1::] # Everything below here is for colors only word = '{b}{purple}%s{c}{b}: %s' % (name, word[len(data['query']) + 1::]) word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}') code.say(word)
def search(code, input): """Queries DuckDuckGo for the specified input.""" try: data = web.get(uri, params={'q': input.group(2)}) tmp = data.text.replace('\r', '').replace('\n', '').strip() target = r'(?im)(<div class="result results_links.*?">.*?<a .*?class="result__a" href="([^"]+)">(.*?)</a>.*?</div>)' found = [ x for x in list(re.findall(target, tmp)) if len(x) > 0 and "badge--ad" not in x[0] ] if len(found) > url_count: found = found[:url_count] results = [] if len(found) < 1: return code.say('{b}No results found{b}') count = 0 for item in found: i = list(item) result = {} result['url'] = i[1] result['title'] = web.escape(web.striptags(i[2])) if len(result['title']) > title_length: result['title'] = result['title'][:title_length] + '{b}...{b}' results.append('{%s}%s{c} - %s' % (url_colors[count], result['title'], result['url'])) count += 1 return code.say(' | '.join(results)) except Exception as e: output.error('Error in search.py: %s' % str(e)) return code.say('{b}Unable to search for %s{b}' % input.group(2))