def spot(market="USD-BTC"): """ Bittrex realtime prices. market - [USD-BTC] """ resp = web.get(base_uri + f"getticker?market={market}") return float(resp["result"]["Last"])
def fml_search(query, id): # ID is index of search query """fml - Retrieve FML search results, via FMyLife.com's dev API.""" # Try to query FML try: query = re.sub(r'[^\w\s]', '+', query) query = query.replace('.', '+') while query.find('++') > -1: query = query.replace('++', '+').strip('+') r = web.get('http://api.fmylife.com/view/search?search=%s&language=%s&key=%s' % (query, language, key)).read() except: return # find god awful FML fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) count = len(fml) if count == 0: return code.say('The definition for "{purple}%s{c}" wasn\'t found.' % ' '.join(parts)) if id > count: id = count # Who agrees agree = re.compile(r'<agree>.*?</agree>').findall(r) # It's their fault! deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.striptags(fml[id - 1]).strip(), 'fml-id': fmlid[id - 1].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[id - 1]).strip(), '-': web.striptags(deserved[id - 1]).strip(), 'id': id, 'max': count }
def check(ip): ip = str(ip) data = web.get(base % web.quote(ip)).read().replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning( str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) return '{b}%s{b}' % item.strip()
def spot(symbol="BTCUSDT"): """ Binance realtime prices. symbol- [BTCUSDT] """ resp = web.get(base_uri + f"ticker/price?symbol={symbol}") return float(resp["price"])
def check(ip): ip = str(ip) data = web.get(base % web.quote(ip)).read().replace('\n', '').replace('\r', '') items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data) if not items: return item = web.striptags(items[0]) if 'We don\'t have data on this IP currently.' in item: return elif 'none of its visits have resulted' in item: return else: item = item.split('Below', 1)[0] if 'The Project Honey Pot system has ' in item: item = item.split('The Project Honey Pot system has ')[1] item = item[0].upper() + item[1:] if 'This IP has not seen any suspicious activity' in data: if 'the IP address' in item: item = item.replace('the IP address', '%s' % ip) output.warning(str(item) + 'This is an old record so it might be invalid.') return if 'the IP address' in item: item = item.replace('the IP address', '{red}%s{c}' % ip) return '{b}%s{b}' % item.strip()
def spot(symbol="USDT_BTC"): """ Poloniex realtime prices. symbol - [USDT_BTC] """ resp = web.get(base_uri + "?command=returnTicker") return float(resp[symbol]["last"])
def _directory(cls): # parse the first page on the directory # and start parsing pages, should there be any more url = 'https://jumpin.chat/directory' response = web.get(url=url) # make sure there is no previous rooms cls._directory_rooms[:] = [] if len(response.errors) > 0: log.error(response.errors) else: cls._soup = BeautifulSoup(response.content, 'html.parser') # get the rooms from the current page(1) on the directory for room_name in cls._parse_page_rooms(): room_details = cls._room_details(room_name) if room_details is not None: cls._directory_rooms.append(room_details) # loop over the pages in the directory cls._iter_dir_pages() return cls._directory_rooms
def user_info(cls, account): """ Get the user information related to the account name. :param account: The tinychat account name. :type account: str :return: A dictionary containing info about the user account. :rtype: dict | None """ url = 'https://tinychat.com/api/v1.0/user/profile?username={0}&'.format( account) response = web.get(url, as_json=True) if len(response.errors) > 0: log.error(response.errors) return None else: if response.json['result'] == 'success': return { 'biography': response.json['biography'], 'gender': response.json['gender'], 'location': response.json['location'], 'role': response.json['role'], 'age': response.json['age'] } return None
def _search(cls, search_term, results=0): url = cls._search_url.format( API_KEY, web.quote(search_term.encode('UTF-8', 'ignore'))) response = web.get(url=url, as_json=True, referer=REFERER) if len(response.errors) > 0: log.error(response.errors) return None else: if 'items' in response.json: tracks = [] for i, item in enumerate(response.json['items']): video_id = item['id']['videoId'] details = cls._details(video_id) if details is not None: tracks.append(details) if results == 0 and len(tracks) == 1: break elif results > 0 and results == len(tracks): break if results == 0 and len(tracks) > 0: return tracks[0] return tracks return None
def user_lookup(code, id, showerror=True): try: data = web.get( 'http://steamdb.info/calculator/?player=%s¤cy=us' % id, timeout=10).read() if 'This profile is private, unable to retrieve owned games.' in data: if showerror: code.say( '{b}Unabled to retrieve info, that account is {red}private{c}!') return realname = re.search( r'<title>.*?</title>', data).group().split('>')[1].split(' \xc2\xb7')[0] status = re.search( r'<td class="span2">Status</td>.*?<td>.*?</td>', data).group() status = web.striptags(status).strip('Status') # Basic user information details = data.split('[list]')[1].split('[/list]')[0] details = re.sub(r'\<\/.*?\>', '', details) details = re.sub(r'\<.*?\>', ' {b}- ', details) details = re.sub(r'\[.*?\]', '', details) details = details.replace(': ', ': {b}') url = 'http://steamcommunity.com/id/' + id return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (realname, status, details, url)) except: if showerror: code.say('{b}Unable to find user information on %s!' % id) return
def spot(ticker="tBTCUSD"): """ Bitfinex realtime prices. ticker - [tBTCUSD] """ resp = web.get(base_uri + f"ticker/{ticker}") return float(resp[-4])
def search(code, input): """Queries DuckDuckGo for the specified input.""" try: data = web.get(uri, params={'q': input.group(2)}) tmp = data.text.replace('\r', '').replace('\n', '').strip() target = r'(?im)<div class="results_links .*?(?!.*web\-result\-sponsored)">.*?<a .*? href="(.*?)">.*?</a>.*?' \ '<div class="snippet">(.*?)</div>.*?<div class="url">(.*?)</div>' found = list(re.findall(target, tmp)) if len(found) > url_count: found = found[:url_count] results = [] if len(found) < 2: return code.say('{b}No results found{b}') count = 0 for item in found: i = list(item) result = {} result['url'] = web.escape(web.striptags(i[0])) result['short'] = web.escape(web.striptags(i[2]).capitalize().split('/')[0]) result['title'] = web.escape(web.striptags(i[1])) if len(result['title']) > title_length: result['title'] = result['title'][:title_length] + '{b}...{b}' results.append('{b}%s{b} - {%s}%s{c} - %s' % (result['short'], url_colors[count], result['title'], result['url'])) count += 1 return code.say(' | '.join(results)) except Exception as e: output.error('Error in search.py: %s' % str(e)) return code.say('{b}Unable to search for %s{b}' % input.group(2))
def get_url_data(url): if len(url) < url_min_length: return False # URL is really short. Don't need shortening. try: uri = web.get(url) if not uri.info().maintype == 'text': return False data = uri.read(1024) # Only read soo much of a large site. title = re.compile('<title>(.*?)</title>', re.IGNORECASE | re.DOTALL).search(data).group(1) title = web.htmlescape(title) title = title.replace('\n', '').replace('\r', '') # Remove spaces... while ' ' in title: title = title.replace(' ', ' ') # Shorten LONG urls if len(title) > 200: title = title[:200] + '[...]' if len(title) < title_min_length: # Title output too short return False return title except: return False
def get_time(code, input): """Returns the current time.""" default = 'est' fmt = 'Time in {timezone} is {hour}:{minute}:{second} ({month}/{day}/{year})' err = 'Incorrect timezone. Syntax: .time <timezone>' if not input.group(2): timezone = default elif len(input.group(2).split()) > 1: timezone = default else: timezone = input.group(2).lower().strip() # Here, try and get the timezone, using the 'uri' try: r = web.get(uri % timezone).read() # Try to parse the string data... this will be fun! # Example output from the server... "2014-01-07T09:44:58-05:00" date, time = r.split('T') year, month, day = date.split('-')[::1] hour, minute, second = time.split(':', 2) if int(hour) > 12: hour = str(int(hour) - 12) second = second.split('-', 1)[0].split('+', 1)[0] return code.say(fmt.format( month=month, day=day, year=year, hour=hour, minute=minute, second=second, timezone=timezone.upper() )) except: return code.say(err)
def spot(symbol="XBTUSD"): """ itBit realtime prices. symbol - [XBTUSD] """ resp = web.get(base_uri + f"markets/{symbol}/ticker") return float(resp["lastPrice"])
def fml_search(query, id): # ID is index of search query """fml - Retrieve FML search results, via FMyLife.com's dev API.""" # Try to query FML try: query = re.sub(r'[^\w\s]', '+', query) query = query.replace('.', '+') while query.find('++') > -1: query = query.replace('++', '+').strip('+') r = web.get( 'http://api.fmylife.com/view/search?search=%s&language=%s&key=%s' % (query, language, key)).read() except: return # find god awful FML fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) count = len(fml) if count == 0: return code.say('The definition for "{purple}%s{c}" wasn\'t found.' % ' '.join(parts)) if id > count: id = count # Who agrees agree = re.compile(r'<agree>.*?</agree>').findall(r) # It's their fault! deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.striptags(fml[id - 1]).strip(), 'fml-id': fmlid[id - 1].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[id - 1]).strip(), '-': web.striptags(deserved[id - 1]).strip(), 'id': id, 'max': count }
def spot(market="btc_usd"): """ Bisq realtime prices. market - [btc_usd] """ resp = web.get(base_uri + f"ticker?market={market}", timeout=8) return float(resp[0]["last"])
def spot(ticker="btcusd"): """ Bitstamp realtime prices. ticker - [btcusd] """ resp = web.get(base_uri + f"ticker/{ticker}") return float(resp["last"])
def spot(pair="XBTUSD", full_pair="XXBTZUSD"): """ Kraken realtime prices. pair - [XBTUSD] """ resp = web.get(base_uri + f"Ticker?pair={pair}") return float(resp["result"][full_pair]["c"][0])
def __init__(self, room_name, username, n_key=None, proxy=None): """ Initialize the Params class. :param room_name: The room name. :type room_name: unicode :param username: The username. :type username: unicode :param n_key: n_key passed on from the login page. :type n_key: str :param proxy: Use a proxy for requests. :type proxy: str """ self._room_name = room_name self._username = username self._provided_n_key = n_key self._proxy = proxy self._n_key = u'' self._flash_vars = [] self._t2 = u'' page = web.get(url=self._base_url.format(self._room_name), proxy=self._proxy) if page.error is None: self._html_source = page.content self._set_n_key() self._set_flash_vars() self._set_t2() else: raise Exception('Something went wrong, page.error=%s' % page.error)
def spot(symbol="btcusd"): """ Gemini realtime prices. symbol - [btcusd] """ resp = web.get(base_uri + f"pubticker/{symbol}") return float(resp["last"])
def spot(symbol="BTC-USD"): """ BTSE realtime prices. symbol - [BTC-USD] """ resp = web.get(base_uri + f"spot/v2/ticker/{symbol}") return float(resp["price"])
def steam_app_auto(code, input): try: data = web.get('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10).read() output = [] output.append( re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0]) # Name # Metacritic Score score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data) if len(score) < 1: score = '{b}N/A{b}' else: score = score[0] output.append('Rating: %s/100' % score) # Released yet? if '<td class="span3">releasestate</td><td>prerelease</td>' in data: output.append('{blue}Prerelease{c}') # OS List if '<td class="span3">oslist</td>' in data: tmp = re.findall( r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0] tmp = re.findall(r'title="(.*?)"', tmp) output.append('OS: ' + ', '.join(tmp)) else: output.append('OS: N/A') # With pricing, there are a few options... # 1. Free, 2. Cost, 3. Cost with discount # As well, 1. Not released (May cause issues with rendering the price # table) or 2. released if 'isfreeapp</td><td>Yes</td>' in data: # We know it's free! output.append('{green}Free{c}') elif '<table class="table table-prices">' in data: tmp = re.findall( r'<table class="table table-prices">.*?<tbody><tr>(.*?)</tr></tbody>', data)[0] tmp = tmp.replace('<td>', '').split('</td>', 1)[0] # We know it's paid... now check if discounted.. if 'price-discount' in tmp: # We know it's discounted initial = tmp.split( 'class="price-initial">', 1)[1].split('</span>', 1)[0] new = tmp.split('</span>', 1)[1].split('<', 1)[0] discount = tmp.split( '"price-discount">', 1)[1].split('<', 1)[0] output.append('{green}%s{c} (%s, was %s)' % (new, discount, initial)) else: output.append('{green}' + tmp) output.append('http://store.steampowered.com/app/%s/' % re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0]) # if else, it's unknown, so ignore it. Likely an issues with release # pricing. return str(' - {b}'.join(output).replace(': ', ': {b}')) except: return
def spot(endpoint="spot", currency="USD"): """ Coinbase realtime prices. endpoint - [spot|buy|sell] currency - [USD] """ resp = web.get(base_uri + f"prices/{endpoint}?" + f"currency={currency}") return float(resp["data"]["amount"])
def spot(pairs="USDXAU"): """ Freeforexapi USD/XAU spot. """ resp = web.get(base_uri + f"live?pairs={pairs}", timeout=4) usd_per_toz = float(1.0 / resp["rates"][pairs]["rate"]) usd_per_gram = (usd_per_toz / rock.grams_per_toz) return usd_per_gram
def stats(): """ Blockchain.com stats. """ resp = web.get(base_uri + "stats") resp['hash_rate'] = int(resp['hash_rate']) resp['difficulty'] = int(resp['difficulty']) return resp
def spot(symbol="USDXAU"): """ JM Bullion USD/XAU spot. """ resp = web.get(base_uri) usd_per_toz = 1.0 / float(resp["quotes"][symbol]) usd_per_gram = (usd_per_toz / rock.grams_per_toz) return usd_per_gram
def spot(symbol1="BTC", symbol2="USD"): """ Cex realtime prices. symbol1 - [BTC] symbol2 - [USD] """ resp = web.get(base_uri + f"ticker/{symbol1}/{symbol2}") return float(resp["last"])
def tag_search(search_str, by_id=True, max_tunes=40): """ Search last.fm for tunes matching the search term and turns them in to a youtube list of Tracks objects. :param search_str: Search term to search for. :type search_str: str :param by_id: If True, only tunes that have a youtube id will be added(recommended) :type by_id: bool :param max_tunes: The max amount of tunes to return. :type max_tunes: int :return: A list of Track objects. :rtype list | None """ url = TAG_SEARCH_URL.format(max_tunes, web.quote(search_str)) lastfm = web.get(url=url, as_json=True) log.debug('lastfm response %s' % lastfm.json) if len(lastfm.errors) > 0: log.error(lastfm.errors) return None else: if 'track' in lastfm.json['results']: if len(lastfm.json['results']['track']) is not 0: yt_tracks = [] for track in lastfm.json['results']['track']: search_str = '%s-%s' % (track['artist'], track['name']) if 'playlink' in track: if 'data-youtube-id' in track['playlink']: youtube_id = track['playlink']['data-youtube-id'] yt = Youtube.id_details(youtube_id) log.debug(yt) if yt is not None: yt_tracks.append(yt) else: if not by_id: yt = Youtube.search(search_str) log.debug( 'search by search string: %s result: %s' % (search_str, yt)) if yt is not None: yt_tracks.append(yt) else: if not by_id: yt = Youtube.search(search_str) log.debug( 'search by search string: %s result: %s' % (search_str, yt)) if yt is not None: yt_tracks.append(yt) return yt_tracks
def getdata(user): try: data = web.get('http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss' % (user)) print data.encoding except: return False if 'No user exists with this name.' in data.text: return False return data
def _set_token(cls): # set the token needed before we can get room details url = 'https://jumpin.chat/api/user/' response = web.get(url=url, as_json=True) if len(response.errors) > 0: log.error(response.errors) elif 'token' in response.json: cls._token = response.json['token']
def spot(symbol="btcusdt"): """ Huobi realtime prices. symbol - [btcusdt] """ resp = web.get(base_uri + f"market/detail/merged?symbol={symbol}") resp = resp["tick"] ask = resp["ask"][0] bid = resp["bid"][0] return (ask + bid) / 2.0
def spot(): """ LBMA USD/XAU spot. """ dat = web.get(base_uri + "today/both.json") am_usd_per_toz = float(dat["gold"]["am"]["usd"]) pm_usd_per_toz = float(dat["gold"]["pm"]["usd"]) usd_per_toz = (am_usd_per_toz + pm_usd_per_toz) / 2.0 usd_per_gram = (usd_per_toz / rock.grams_per_toz) return usd_per_gram
def getdata(user): try: data = web.get( 'http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss' % (user)).read() except: return False if 'No user exists with this name.' in data: return False else: return data
def dinner(code, input): """fd -- WHAT DO YOU WANT FOR F*****G DINNER?""" err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.' try: data = web.get(uri).read() results = re_mark.findall(data) if not results: return code.say(err) url, food = results[0][0], web.htmlescape(results[0][1]) code.say('WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s' % ( food.upper(), url)) except: return code.say(err)
def _details(cls, video_id, check=True): log.debug('video details for: %s, check: %s' % (video_id, check)) url = cls._video_details_url.format(API_KEY, video_id) response = web.get(url=url, as_json=True, referer=REFERER) if len(response.errors) > 0: log.error(response.errors) return None else: if 'items' in response.json: track = None if len(response.json['items']) != 0: # does deleted videos contain contentDetails? if 'contentDetails' in response.json['items'][0]: content_details = response.json['items'][0][ 'contentDetails'] if check: if 'regionRestriction' in content_details: if 'blocked' in content_details[ 'regionRestriction']: blocked = content_details[ 'regionRestriction']['blocked'] if cls._is_blocked(blocked): return track if 'allowed' in content_details[ 'regionRestriction']: allowed = content_details[ 'regionRestriction']['allowed'] if not cls._is_allowed(allowed): return track video_time = string_util.convert_to_seconds( content_details['duration']) video_title = response.json['items'][0]['snippet'][ 'title'] image_medium = response.json['items'][0]['snippet'][ 'thumbnails']['medium']['url'] track = Track(video_id=video_id, video_time=video_time, video_title=video_title, image=image_medium) return track
def py(code, input): """python <commands> -- Execute Python inside of a sandbox""" query = input.group(2).encode('utf-8') uri = 'http://tumbolia.appspot.com/py/' try: answer = web.get(uri + web.quote(query)).read() if answer: answer = answer.replace('\n', ' ').replace( '\t', ' ').replace('\r', '') return code.reply(answer) else: return code.reply('Sorry, no {b}%s{b}') except: return code.reply('{red}The server did not return an answer.')
def factoid(code, input): """ `<word> -- Shows what data is associated with <word>. ` <add|delete|info> [args] -- for management """ if len(input.group().strip()) == 1: return # If it's a management command... if input.group().startswith('` '): if not input.admin: return code.reply('{red}You need to be an admin to use that command!') return factoid_manage(input.group().split(' ', 1)[1], code, input) db = database.get(code.nick, 'factoids') if not db: db = [] if len(input.group(1).strip().split()) <= 1: id, arguments = input.group(1), '' else: id, arguments = input.group(1).split(' ', 1) if id not in db: return code.say('{red}That command doesn\'t exist. (If Admin, add it with "{purple}` add <name> <data>{red}")') f = db[id] if f.startswith('<py>'): data = f[4:].strip() variables = 'input="""{}"""; nick="{}"; sender="{}"; bot="{}";'.format( arguments.replace('"', '\\"'), input.nick, input.sender, code.nick ) result = web.exec_py(variables + data) if 'Traceback (most recent call last)' in result: result = 'Python error: ' + web.haste(result) return code.say(result) elif f.startswith('<act>'): result = f[5:].strip() return code.action(result) elif f.startswith('<url>'): url = f[5:].strip() try: return code.say(web.get(url).read()) except: return code.say('Failed to fetch the URL.') else: return code.say(f)
def gettld(code, input): """tld <shorthand> -- Show information about the given Top Level Domain.""" page = web.get(uri).read() search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(input.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if not matches: search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(input.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = list(matches[0]) i = 0 while i < len(matches): matches[i] = r_tag.sub("", matches[i]) i += 1 desc = matches[2] if len(desc) > 400: desc = desc[:400] + "..." reply = "%s -- %s. IDN: %s, DNSSEC: %s" % ( matches[1], desc, matches[3], matches[4] ) code.say(reply) else: search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(unicode(input.group(2))) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = matches[0] dict_val = dict() dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val[ "idn"], dict_val["dnssec"], dict_val["sld"] = matches for key in dict_val: if dict_val[key] == " ": dict_val[key] = "N/A" dict_val[key] = r_tag.sub('', dict_val[key]) if len(dict_val["notes"]) > 400: dict_val["notes"] = dict_val["notes"][:400] + "..." reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % ( dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] ) else: reply = "No matches found for TLD: {0}".format( unicode(input.group(2))) code.say(reply)
def _room_details(cls, room_name): # get the room details of a room if cls._token is None: cls._set_token() url = 'https://jumpin.chat/api/rooms/%s' % room_name response = web.get(url=url, as_json=True) if len(response.errors) > 0: log.error(response.errors) return None else: room = Room(**response.json) if len(room.users) > 0: return room return None
def gen_db(botname): global uc_names, cp_names, uc # http://www.unicode.org/reports/tr44/#UnicodeData.txt output.info('Downloading Unicode data') data = get('http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt').read() data = data.split('\n') del data[-1] # http://www.unicode.org/reports/tr44/#UnicodeData.txt for line in data: tmp = line.split(';') name = tmp[1] if tmp[10]: name = name + ' ' + str(tmp[10]) uc[name] = tmp uc_names.append(name) cp_names[tmp[0]] = name database.set(botname, {'uc': uc, 'uc_names': uc_names, 'cp_names': cp_names, 'time': int(time.time())}, 'unicodedata')
def fucking_weather(code, input): """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather""" if not input.group(2): return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.') try: text = web.quote(input.group(2)) data = web.get('http://thefuckingweather.com/?where=%s' % text).read() temp = re.compile( r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0] temp = re.sub(r'\<.*?\>', '', temp).strip().replace(' ', '').replace('"', '') remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0] remark = re.sub(r'\<.*?\>', '', remark).strip() flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0] flavor = re.sub(r'\<.*?\>', '', flavor).strip() return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor) except: return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
def fml_random(): """fml - Retrieve random FML's, via FMyLife.com's dev API.""" try: r = web.get('http://api.fmylife.com/view/random/1?language=%s&key=%s' % ( language, key )).read() except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.htmlescape(web.striptags(fml[0]).strip()), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]).strip(), '-': web.striptags(deserved[0]).strip() }
def wa(code, input): """Wolfram Alpha search""" query = input.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query), timeout=10).read() except: return code.say('It seems WolframAlpha took too long to respond!') if answer and 'json stringified precioussss' not in answer: answer = answer.strip('\n').split(';') for i in range(len(answer)): answer[i] = answer[i].replace('|', '').strip() answer = '{purple}{b}WolframAlpha: {c}{b}' + ' - '.join(answer).replace('\\', '').replace('->', ': ') while ' ' in answer: answer = answer.replace(' ', ' ') return code.say(web.htmlescape(answer)) else: return code.reply('{red}Sorry, no result.')
def fml_id_search(query_id): """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API.""" try: r = web.get('http://api.fmylife.com/view/%s/nocomment?language=%s&key=%s' % ( str(query_id), language, key )).read() except: return fml = re.compile(r'<text>.*?</text>').findall(r) fmlid = re.compile(r'<item id=".*?">').findall(r) agree = re.compile(r'<agree>.*?</agree>').findall(r) deserved = re.compile(r'<deserved>.*?</deserved>').findall(r) return { 'fml': web.htmlescape(web.striptags(fml[0]).strip()), 'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(), '+': web.striptags(agree[0]).strip(), '-': web.striptags(deserved[0]).strip() }
def get_tweets(url, sender_uid=False): try: data = web.get(url).read().replace('\r', '').replace('\n', ' ') data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data) except: return tweets = [] for tweet in data: try: tmp = {} tmp['url'] = list(r_tweeturl.findall(tweet)[0]) tmp['url'] = 'https://twitter.com/%s/status/%s' % (tmp['url'][0], tmp['url'][1]) tmp['full'] = web.htmlescape(r_fullname.findall(tweet)[0].strip()) tmp['user'] = r_username.findall(tweet)[0].strip() tmp['time'] = web.striptags(r_time.findall(tweet)[0]).strip() tweet_data = r_tweet.findall(tweet)[0].strip() urls = r_url.findall(tweet_data) for url in urls: url = list(url) tweet_data = tweet_data.replace(url[1], url[0]) tmp['text'] = web.htmlescape(web.striptags(tweet_data).strip()) uids = r_uid.findall(' ' + tmp['text']) for uid in uids: tmp['text'] = tmp['text'].replace( uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip() # Check if it's a retweet if sender_uid: if sender_uid.lower().strip('@') != tmp['user'].lower().strip('@'): tmp['text'] = tmp['text'] + \ ' ({purple}{b}@{b}%s{c})' % tmp['user'] tmp['user'] = sender_uid.strip( '@') + ' {blue}{b}retweeted{c}{b}' tweets.append(tmp) except: continue if tweets: return tweets else: return False