def rottentomatoes(inp, bot=None): """rt <title> -- gets ratings for <title> from Rotten Tomatoes""" api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) if not api_key: return "error: no api key set" title = inp.strip() results = http.get_json(movie_search_url, q=title, apikey=api_key) if results['total'] == 0: return 'No results.' movie = results['movies'][0] title = movie['title'] movie_id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = movie['links']['alternate'] if critics_score == -1: return reviews = http.get_json(movie_reviews_url % movie_id, apikey=api_key, review_type='all') review_count = reviews['total'] fresh = critics_score * review_count / 100 rotten = review_count - fresh return "{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \ "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)
def youtime(inp): """youtime <query> -- Gets the total run time of the first YouTube search result for <query>.""" request = http.get_json(search_api_url, q=inp) if 'error' in request: return 'error performing search' if request['data']['totalItems'] == 0: return 'no results found' video_id = request['data']['items'][0]['id'] request = http.get_json(api_url.format(video_id)) if request.get('error'): return data = request['data'] if not data.get('duration'): return length = data['duration'] views = data['viewCount'] total = int(length * views) length_text = timeformat.format_time(length, simple=True) total_text = timeformat.format_time(total, accuracy=8) return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \ 'a total run time of {}!'.format(data['title'], length_text, views, total_text)
def imdb(text): """imdb <movie> -- Gets information about <movie> from IMDb.""" strip = text.strip() if id_re.match(strip): content = http.get_json("http://www.omdbapi.com/", i=strip) else: content = http.get_json("http://www.omdbapi.com/", t=strip) if content.get('Error', None) == 'Movie not found!': return 'Movie not found!' elif content['Response'] == 'True': content['URL'] = 'http://www.imdb.com/title/{}'.format(content['imdbID']) out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s' if content['Runtime'] != 'N/A': out += ' \x02%(Runtime)s\x02.' if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A': out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \ ' votes.' out += ' %(URL)s' return out % content else: return 'Unknown error.'
def rottentomatoes(inp,bot=None): '.rt <title> -- gets ratings for <title> from Rotten Tomatoes' api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) if not api_key: return "error: no api key set" results = http.get_json(movie_search_url, q=inp, apikey=api_key) if results['total'] == 0: return 'no results' movie = results['movies'][0] title = movie['title'] id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = movie['links']['alternate'] if critics_score == -1: return reviews = http.get_json(movie_reviews_url % id, apikey=api_key, review_type='all') review_count = reviews['total'] fresh = critics_score * review_count / 100 rotten = review_count - fresh return u"%s - critics: \x02%d%%\x02 (%d\u2191/%d\u2193) audience: \x02%d%%\x02 - %s" % (title, critics_score, fresh, rotten, audience_score, url)
def rt(inp, api_key=None): """.rt <title> - Gets ratings for <title> from Rotten Tomatoes.""" results = http.get_json(movie_search_url, q=inp, apikey=api_key) if results['total'] == 0: return 'No results.' movie = results['movies'][0] title = movie['title'] id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = movie['links']['alternate'] if critics_score == -1: return reviews = http.get_json(movie_reviews_url % id, apikey=api_key, review_type='all') review_count = reviews['total'] fresh = critics_score * review_count / 100 rotten = review_count - fresh return u"%s - critics: \x02%s%%\x02 (%s\u2191%s\u2193)" \ " audience: \x02%s%%\x02 - %s" % (title.strip(), str(critics_score).strip(), str(fresh) .strip(), str(rotten).strip(), str(audience_score).strip(' '), url.strip(' '))
def youtime(inp, bot=None): """youtime <query> -- Gets the total run time of the first YouTube search result for <query>.""" key = bot.config.get("api_keys", {}).get("youtube") request = http.get_json(search_api_url, key=key, q=inp, type='video') if 'error' in request: return 'Error performing search.' if request['pageInfo']['totalResults'] == 0: return 'No results found.' video_id = request['items'][0]['id']['videoId'] request = http.get_json(api_url, key=key, id=video_id) data = request['items'][0] length = data['contentDetails']['duration'] timelist = length[2:-1].split('M') seconds = 60*int(timelist[0]) + int(timelist[1]) views = int(data['statistics']['viewCount']) total = int(seconds * views) length_text = timeformat.format_time(seconds, simple=True) total_text = timeformat.format_time(total, accuracy=8) return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \ 'a total run time of {}!'.format(data['snippet']['title'], length_text, views, total_text)
def frink(inp): ".frink <query> -- searches Frinkiac for Simpsons quotes" steamed_hams = http.get_json(frinkiac_search_url, q=inp) if not steamed_hams: return "no cromulent quotes found" if(len(steamed_hams) > 10): steamed_hams = steamed_hams[:10] SKIIIINNER = random.choice(steamed_hams) episode = SKIIIINNER['Episode'] timestamp = SKIIIINNER['Timestamp'] aurora_borealis = http.get_json(frinkiac_caption_url, e=episode, t=timestamp) leader_beans = [] for skinner_excuse in aurora_borealis['Subtitles']: leader_beans.append(skinner_excuse['Content']) ah_superintendent_chalmers = string.join(leader_beans) if len(ah_superintendent_chalmers) > 250: ah_superintendent_chalmers = ah_superintendent_chalmers[:250] + "..." what_a_pleasant_surprise = frinkiac_url + 'caption/%s/%s' % (episode, timestamp) return "\"%s\" %s" % (ah_superintendent_chalmers, what_a_pleasant_surprise)
def rottentomatoes(inp, bot=None): '.rt <title> -- gets ratings for <title> from Rotten Tomatoes' api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) if not api_key: return None title = inp.strip() results = http.get_json(movie_search_url % (http.quote_plus(title), api_key)) if results['total'] > 0: movie = results['movies'][0] title = movie['title'] id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = movie['links']['alternate'] if critics_score != -1: reviews = http.get_json(movie_reviews_url%(id, api_key)) review_count = reviews['total'] fresh = critics_score * review_count / 100 rotten = review_count - fresh return response % (title, critics_score, fresh, rotten, audience_score, url)
def rottentomatoes(inp, bot=None): ".rt <title> -- gets ratings for <title> from Rotten Tomatoes" api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) if not api_key: return "error: no api key set" results = http.get_json(movie_search_url, q=inp, apikey=api_key) if results["total"] == 0: return "no results" movie = results["movies"][0] title = movie["title"] id = movie["id"] critics_score = movie["ratings"]["critics_score"] audience_score = movie["ratings"]["audience_score"] url = movie["links"]["alternate"] if critics_score == -1: return reviews = http.get_json(movie_reviews_url % id, apikey=api_key, review_type="all") review_count = reviews["total"] fresh = critics_score * review_count / 100 rotten = review_count - fresh return u"%s - critics: \x02%d%%\x02 (%d\u2191/%d\u2193) audience: \x02%d%%\x02 - %s" % ( title, critics_score, fresh, rotten, audience_score, url, )
def urban(text): """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" if text: # clean and split the input text = text.lower().strip() parts = text.split() # if the last word is a number, set the ID to that number if parts[-1].isdigit(): id_num = int(parts[-1]) # remove the ID from the input string del parts[-1] text = " ".join(parts) else: id_num = 1 # fetch the definitions page = http.get_json(define_url, term=text, referer="http://m.urbandictionary.com") if page['result_type'] == 'no_results': return 'Not found.' else: # get a random definition! page = http.get_json(random_url, referer="http://m.urbandictionary.com") id_num = None definitions = page['list'] if id_num: # try getting the requested definition try: definition = definitions[id_num - 1] def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate_str(def_text, 200) except IndexError: return 'Not found.' url = definition['permalink'] output = "[{}/{}] {} :: {}".format(id_num, len(definitions), def_text, url) else: definition = random.choice(definitions) def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate_str(def_text, 200) name = definition['word'] url = definition['permalink'] output = "\x02{}\x02: {} :: {}".format(name, def_text, url) return output
def stock(inp): '''.stock <symbol> [info] -- retrieves a weeks worth of stats for given symbol. Optionally displays information about the company.''' arguments = inp.split(' ') symbol = arguments[0].upper() try: fundamentals = http.get_json( 'https://api.robinhood.com/fundamentals/{}/'.format(symbol)) quote = http.get_json( 'https://api.robinhood.com/quotes/{}/'.format(symbol)) except http.HTTPError: return '{} is not a valid stock symbol.'.format(symbol) if fundamentals['open'] is None or quote['ask_price'] is None: return 'unknown ticker symbol %s' % inp if len(arguments) > 1 and arguments[1] == 'info': return fundamentals['description'] # Manually "calculate" change since API does not provide it price = float(quote['last_trade_price']) change = price - float(quote['adjusted_previous_close']) # Extract name as Upper Case Corp Name from description. name = '' m = re.match(r'^([A-Z]\S* )*', fundamentals['description']) if m: name = m.group(0) def maybe(name, key, fmt=human_price): if fundamentals.get(key): return ' | {0}: {1}'.format(name, fmt(float(fundamentals[key]))) return '' response = { 'name': name, 'change': change, 'percent_change': 100 * change / (price - change), 'symbol': quote['symbol'], 'price': price, 'color': '5' if change < 0 else '3', 'high': float(fundamentals['high']), 'low': float(fundamentals['low']), 'average_volume': maybe('Volume', 'average_volume'), 'market_cap': maybe('MCAP', 'market_cap'), 'pe_ratio': maybe('P/E', 'pe_ratio', fmt='{:.2f}'.format), } return ("{name}({symbol}) ${price:,.2f} \x03{color}{change:,.2f} ({percent_change:,.2f}%)\x03 | " "Day Range: ${low:,.2f} - ${high:,.2f}" "{pe_ratio}{average_volume}{market_cap}").format(**response)
def hats(inp, api_key=None): """.hats <Steam Vanity URL|Numeric Steam ID> - Shows backpack information for TF2.""" # Get SteamID if inp.isdigit(): steamid64 = inp else: try: id_url = 'http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key=%s&vanityurl=%s' % \ (api_key, http.quote(inp.encode('utf8'), safe='')) steamid64 = http.get_json(id_url)['response']['steamid'] except: return "Error getting numeric Steam ID, please try format '.hats <Numeric Steam ID>'" # Get Steam User's TF2 Inventory/Check for User try: inv_url = 'http://api.steampowered.com/IEconItems_440/GetPlayerItems/v0001/?SteamID=%s&key=%s' % \ (steamid64, api_key) inv = http.get_json(inv_url) except: return "Sorry, I couldn't find '%s''s Steam inventory." % inp # Count Items into Categories total, dropped, dhats, dun, un, hats = 0, 0, 0, 0, 0, 0 for x in inv["result"]["items"]: total += 1 ind = int(x['defindex']) if x['origin'] == 0: if x['quality'] == 5: dun += 1 if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152: dhats += 1 else: dropped += 1 else: if x['quality'] == 5: un += 1 if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152: hats += 1 # Get Market Price for Backpack try: backpack_url = 'http://backpack.tf/api/IGetUsers/v3/?steamids=%s' % steamid64 backpack = http.get_json(backpack_url) ref = backpack['response']['players'][steamid64]['backpack_value']['440'] except: ref = '???' return '%s has %s items, %s hats, and %s unusuals (%s/%s/%s of the ' \ 'items/hats/unusals were from drops) and has a backpack worth %s ref' % \ (inp, total, hats + dhats, un + dun, dropped, dhats, dun, ref)
def steamcalc(inp, say='', api_key=None): """.steamcalc <Steam Vanity URL ID> - Gets Steam account value for a given Vanity ID. Uses steamcommunity.com/id/<nickname>.""" # Get SteamID try: steamid = http.get_json(user_url % (api_key, http.quote(inp.encode('utf8'), safe='')))['response']['steamid'] except: return "'%s' does not appear to be a valid Vanity ID. Uses steamcommunity.com/id/<VanityID>." % inp # Get Steam profile info try: profile = http.get_json(profile_url % (api_key, steamid))['response']['players'][0] persona = profile['personaname'] except: return "Error looking up %s's Steam profile." % inp # Get Steam account games for User try: account = http.get_json(account_url % (api_key, steamid))['response']['games'] games = [str(item['appid']) for item in account] except: return "Error looking up %s's Steam inventory." % inp # Get info for games say("Collecting data for %s, please wait..." % inp) games_info = {} try: while games: games_temp, games = games[:20], games[20:] gurl = games_url % ','.join(games_temp) games_info = dict(games_info.items() + http.get_json(gurl).items()) except: return "Error looking up game data, please try again later." # Aggregate Steam data prices = [] scores = [] for game in games_info: try: prices.append(games_info[game]['data']['price_overview']['final']) scores.append(games_info[game]['data']['metacritic']['score']) except: pass #print games_info[game] prices = [int(price) / 100. for price in prices] scores = [float(score) for score in scores] total_price = "{0:.2f}".format(sum(prices)) avg_score = "{0:.1f}".format(sum(scores) / len(scores)) say("{} has {} games with a total value of ${} and an average metascore of {}".format(persona, len(games_info), total_price, avg_score))
def cube(inp): up = '\x0309up' down = '\x0304down' status = http.get_json('http://direct.cyberkitsune.net/canibuycubeworld/status.json') stats = http.get_json('http://direct.cyberkitsune.net/canibuycubeworld/stats.json') siteup = str(round(((float(stats['siteup'])/stats['updatecount'])*100)*100)/100) + '%' regup = str(round(((float(stats['regup'])/stats['updatecount'])*100)*100)/100) + '%' shopup = str(round(((float(stats['shopup'])/stats['updatecount'])*100)*100)/100) + '%' out = 'Picroma is ' + (up if status['site'] else down) + ' \x03(%s)' % siteup out += ' | Registration is ' + (up if status['reg'] else down) + ' \x03(%s)' % regup out += ' | Shop is ' + (up if status['shop'] else down) + ' \x03(%s)' % shopup return out
def api_get(kind, query): """Use the RESTful Google Search API.""" if kind == 'image': url = ('https://www.googleapis.com/customsearch/v1?key={}&cx={}' '&searchType={}&num=1&safe=off&q={}') return http.get_json(url.format(query[0], query[1], kind, query[2])) elif kind == 'images': url = ('https://www.googleapis.com/customsearch/v1?key={}&cx={}' '&searchType={}&num=1&safe=off&q={}&fileType="{}"') return http.get_json(url.format(query[0], query[1], 'image', query[2], query[3])) else: url = ('https://www.googleapis.com/customsearch/v1?key={}&cx={}' '&num=1&safe=off&q={}') return http.get_json(url.format(query[0], query[1], query[2].encode('utf-8')))
def lastfm(inp,nick=None,api_key=None): ".lastfm [user1] [user2] -- gets Last.fm information about a single user or compares two users. "\ "If user1 is blank then the user matching the current nickname will be returned." try: user1, user2 = inp.split(' ') return compare(user1,user2,api_key) except ValueError: user = inp if not inp: user = nick user_json = http.get_json(api_url, method='user.getinfo', user=user, api_key=api_key) try: #check if user exists return user_json['message'].replace('that name','the name ' + user) except: pass user_info = user_json['user'] output = user_info['url'] + ' | ' + user_info['playcount'] + ' plays' if user_info['playcount'] != '0': #keyerror with zero plays output += ' | Top artists: ' top_artists = http.get_json(api_url, method='user.gettopartists', user=user, api_key=api_key)['topartists'] count = int(top_artists['@attr']['total']) top_artists = top_artists['artist'] if count == 0: #arnie is a dick and had only two artists and tracks output += 'none' elif count > 4: count = 3 print count for i in range(0,count): output += top_artists[i]['name'] + ' (' + top_artists[i]['playcount'] + ')' if i < (count-1): output += ', ' output += ' | Top tracks: ' top_tracks = http.get_json(api_url, method='user.gettoptracks', user=user, api_key=api_key)['toptracks'] count = int(top_tracks['@attr']['total']) top_tracks = top_tracks['track'] if count == 0: output += 'none' elif count > 4: count = 3 print count for i in range(0,count): output += top_tracks[i]['artist']['name'] + ' - ' + top_tracks[i]['name'] + ' (' + top_tracks[i]['playcount'] + ')' if i < (count-1): output += ', ' return output
def hyle(inp, say=None): subreddit = [ "conspiracy", "twinpeaks", "mensrights", "crime", ] if random.random() > 0.075: jsonData = http.get_json('http://www.reddit.com/r/' + random.choice(subreddit) + '/.json') say('<hyle> ' + random.choice(jsonData['data']['children'])['data']['title'].lower()) else: jsonData = http.get_json('http://www.reddit.com/r/ass.json') say('<hyle> ' + random.choice(jsonData['data']['children'])['data']['url']) say('<hyle> ass like that')
def xkcd_info(xkcd_id, url=False): """ takes an XKCD entry ID and returns a formatted string """ data = http.get_json("http://www.xkcd.com/" + xkcd_id + "/info.0.json") date = "{} {} {}".format(data['day'], months[int(data['month'])], data['year']) if url: url = " | http://xkcd.com/" + xkcd_id.replace("/", "") return "xkcd: \x02{}\x02 ({}){}".format(data['title'], date, url if url else "")
def get_video_description(vid_id, api_key): j = http.get_json(info_url, id=vid_id, key=api_key) if not j['pageInfo']['totalResults']: return j = j['items'][0] duration = j['contentDetails']['duration'].replace('PT', '').lower() published = time.strptime(j['snippet']['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z") published = time.strftime("%Y.%m.%d", published) views = group_int_digits(j['statistics']['viewCount'], ',') out = (u'\x02{snippet[title]}\x02 - length \x02{duration}\x02 - ' u'{statistics[likeCount]}\u2191{statistics[dislikeCount]}\u2193 - ' u'\x02{views}\x02 views - ' u'\x02{snippet[channelTitle]}\x02 on \x02{published}\x02' ).format(duration=duration, views=views, published=published, **j) # TODO: figure out how to detect NSFW videos return out
def goog_trans(text, slang, tlang): url = 'http://ajax.googleapis.com/ajax/services/language/translate?v=1.0' parsed = http.get_json(url, q=text, langpair=(slang + '|' + tlang)) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error with the translation server: %d: %s' % ( parsed['responseStatus'], '')) return unescape(parsed['responseData']['translatedText'])
def get_video_description(vid_id): j = http.get_json(url % vid_id) if j.get('error'): return j = j['data'] out = '\x02%s\x02' % j['title'] out += ' - length \x02' length = j['duration'] if length / 3600: # > 1 hour out += '%dh ' % (length / 3600) if length / 60: out += '%dm ' % (length / 60 % 60) out += "%ds\x02" % (length % 60) if 'rating' in j: out += ' - rated \x02%.2f/5.0\x02 (%d)' % (j['rating'], j['ratingCount']) if 'viewCount' in j: out += ' - \x02%s\x02 views' % locale.format('%d', j['viewCount'], 1) upload_time = time.strptime(j['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z") out += ' - \x02%s\x02 on \x02%s\x02' % (j['uploader'], time.strftime("%Y.%m.%d", upload_time)) if 'contentRating' in j: out += ' - \x034NSFW\x02' return out
def convert_ltc(inp,conn=None,chan=None): inp = inp.lower().replace(',','').split() inp_amount = inp[0] amount = inp_amount #get ltc price data = http.get_json("https://btc-e.com/api/2/ltc_usd/ticker") data = data['ticker'] ticker = {'buy': data['buy']} ltc_price = ("%(buy)s" % ticker).replace('$','') if 'ltc' in inp[3]: currency = inp[1] if not 'usd' in currency: amount = convert('%s %s to usd' % (amount,currency)).split('=')[1].split()[0] result = (float(amount) / float(ltc_price)) elif 'ltc' in inp[1]: currency = inp[3] if not 'usd' in currency: conversion_rate = (float(convert('10000 usd to %s' % currency).split('=')[1].split()[0]) / 10000) result = ((float(conversion_rate) * float(ltc_price)) * float(amount)) else: result = (float(amount) * float(ltc_price)) #result = "%.2f" % result message = '%s %s = %s %s' % ('{:20,.2f}'.format(float(amount)).strip(),inp[1],'{:20,.2f}'.format(float(result)).strip(),inp[3]) out = "PRIVMSG %s :%s" % (chan, message) conn.send(out)
def bit2Ouya(inp, say=None): ".bitcoin -- gets current exchange rate for bitcoins from mtgox" data = http.get_json("https://data.mtgox.com/api/2/BTCUSD/money/ticker") data = data['data'] buy = data['buy']['value'] ouya = float(buy[0:3]) / 99.99 say("Buttcoins currently selling for: " + str(ouya) + " OUYAs!")
def hitze(inp): hitzelist = [ "ahahaaha", "lol", "heh", "omg.", "uugh", "why..", "lol pcgaming", "rip", "sperg", "omg hyle", ] subreddit = [ "pics", "wtf", "cityporn", "gaming", "minecraftcirclejerk", "gifs", "nba", ] noSelf = False while noSelf == False: jsonData = http.get_json('http://www.reddit.com/r/' + random.choice(subreddit) + '/.json') potentialURL = random.choice(jsonData['data']['children'])['data']['url'] if 'reddit' in potentialURL: noSelf = False else: noSelf = True return "<hitz> " + potentialURL + " " + random.choice(hitzelist)
def vimeo_url(match): info = http.get_json('http://vimeo.com/api/v2/video/%s.json' % match.group(1)) if info: return ("\x02%(title)s\x02 - length \x02%(duration)ss\x02" % info[0])
def doge(inp, say=None): ".doge -- Returns the value of a dogecoin." try: # get btc price bitcoin_price = re.search(r'\d+\.\d+',bitcoin('coinbase')).group(0) # get doge->btc price url = "https://www.coins-e.com/api/v2/markets/data/" data = http.get_json(url) data = data['markets'] data = data['DOGE_BTC'] data = data['marketstat'] current = {'buy': data['ltp']} data = data['24h'] average = { 'volume': data['volume'], 'high': data['h'], 'avg': data['avg_rate'], 'low': data['l'], } except: return 'Error: Doge is worthless.' result = float(bitcoin_price) * float(current['buy']) dollar_result = 1 / float(result) lotsadoge = 10000 * result result = ("Price: \x0307$%s\x0f - $1=\x0307%s\x0f Doge - 10,000 DOGE=\x0307$%s\x0f - BTC: \x0307%s\x0f" % (result,dollar_result,lotsadoge,current['buy'])) result2 = ("Average: \x0307%(avg)s\x0f - High: \x0307%(high)s\x0f - Low: \x0307%(low)s\x0f" % average) say("{} - {}".format(result, result2))
def get_location(location): location_data = http.get_json( "http://maps.googleapis.com/maps/api/geocode/json?address={}&sensor=false".format(location) )["results"][0] location_name = location_data["formatted_address"] Location_latlong = [location_data["geometry"]["location"]["lat"], location_data["geometry"]["location"]["lng"]] return (location_name, Location_latlong)
def bancount(text): """bancount <user> -- Gets a count of <user>s minecraft bans from fishbans""" user = text.strip() try: request = http.get_json(api_url.format(quote_plus(user))) except (http.HTTPError, http.URLError) as e: return "Could not fetch ban data from the Fishbans API: {}".format(e) if not request["success"]: return "Could not fetch ban data for {}.".format(user) user_url = "http://fishbans.com/u/{}/".format(user) services = request["stats"]["service"] out = [] for service, ban_count in list(services.items()): if ban_count != 0: out.append("{}: \x02{}\x02".format(service, ban_count)) else: pass if not out: return "The user \x02{}\x02 has no bans - {}".format(user, user_url) else: return "Bans for \x02{}\x02: {} - {}".format(user, formatting.get_text_list(out, "and"), user_url)
def gitlabquery(course, exercise, action, submission_dir): ''' Queries gitlab API to check repository properties. ''' if not "require_gitlab" in exercise: raise ConfigError("This action needs require_gitlab in exercise.") if not "token" in action: raise ConfigError("Token missing from configuration for gitlab privacy check.") url = None err = "" try: with open(submission_dir + "/user/gitsource") as content: source = content.read() try: from urllib.parse import quote_plus except ImportError: from urllib import quote_plus rid = quote_plus(source[source.index(":") + 1:]) url = "https://%s/api/v3/projects/%s?private_token=%s" % (exercise["require_gitlab"], rid, action["token"]) data = get_json(url) if "private" in action and action["private"] and data["public"]: err = "%s has public access in settings! Remove it to grade exercises." % (data["web_url"]) if "forks" in action: if not "forked_from_project" in data or \ data["forked_from_project"]["path_with_namespace"] != action["forks"]: err = "%s is not forked from %s." % (data["web_url"], action["forks"]) except Exception: LOGGER.exception("Failed to check gitlab URL: %s", url) return { "points": 0, "max_points": 0, "out": "", "err": err, "stop": err != "" }
def catfacts(inp, nick='', db=None, bot=None, notice=None): response = http.get_json(api_url) fact = response["facts"][0] return u"{} Thank you for using catfacts. To stop this feature use the command 'catfacts -disable'.".format(fact)
def youtube(inp, api_key=None): '.youtube <query> -- returns the first YouTube search result for <query>' params = { 'key': api_key, 'fields': 'items(id,snippet(channelId,title))', 'part': 'snippet', 'type': 'video', 'q': inp } j = http.get_json(search_api_url, **params) if 'error' in j: return 'error while performing the search' results = j.get("items") if not results: return 'no results found' vid_id = j['items'][0]['id']['videoId'] return get_video_description(vid_id, api_key) + " - " + video_url % vid_id
def suggest(inp, inp_unstripped=None): ".suggest [#n] <phrase> -- gets a random/the nth suggested google search" if inp_unstripped is not None: inp = inp_unstripped m = re.match("^#(\d+) (.+)$", inp) num = 0 if m: num, inp = m.groups() num = int(num) json = http.get_json( "http://suggestqueries.google.com/complete/search", client="firefox", q=inp ) suggestions = json[1] if not suggestions: return "no suggestions found" if not num: num = random.randint(1, len(suggestions)) if len(suggestions) + 1 <= num: return "only got %d suggestions" % len(suggestions) out = suggestions[num - 1] return "#%d: %s" % (num, out)
def stock(inp, api_key=None): """stock <symbol> - Looks up stock information""" params = {'function': 'GLOBAL_QUOTE', 'apikey': api_key, 'symbol': inp} quote = http.get_json(url, query_params=params) if not quote.get("Global Quote"): return "Unknown ticker symbol '{}'".format(inp) quote = { k.split(' ')[-1]: tryParse(v) for k, v in quote['Global Quote'].items() } quote['url'] = web.try_googl('https://finance.yahoo.com/quote/' + inp) try: quote['color'] = "5" if float(quote['change']) < 0 else "3" return "{symbol} - ${price:.2f} " \ "\x03{color}{change:+.2f} ({percent:.2f}%)\x0F " \ "H:${high:.2f} L:${low:.2f} O:${open:.2f} " \ "Volume:{volume:,.0f} - {url}".format(**quote) except: return "Error parsing return data, please try again later."
def get_video_description(vid_id): j = http.get_json(url % vid_id) if j.get('error'): return j = j['data'] out = '\x02%s\x02' % j['title'] if not j.get('duration'): return out out += ' - length \x02' length = j['duration'] if length / 3600: # > 1 hour out += '%dh ' % (length / 3600) if length / 60: out += '%dm ' % (length / 60 % 60) out += "%ds\x02" % (length % 60) if 'rating' in j: out += ' - rated \x02%.2f/5.0\x02 (%d)' % (j['rating'], j['ratingCount']) if 'viewCount' in j: out += ' - \x02%s\x02 views' % group_int_digits(j['viewCount']) upload_time = time.strptime(j['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z") out += ' - \x02%s\x02 on \x02%s\x02' % ( j['uploader'], time.strftime("%Y.%m.%d", upload_time)) if 'contentRating' in j: out += ' - \x034NSFW\x02' return out
def get_video_description(vid_id, api_key): j = http.get_json(INFO_URL, id=vid_id, key=api_key) if not j["pageInfo"]["totalResults"]: return j = j["items"][0] duration = j["contentDetails"]["duration"].replace("PT", "").lower() published = j["snippet"]["publishedAt"].replace(".000Z", "Z") published = time.strptime(published, "%Y-%m-%dT%H:%M:%SZ") published = time.strftime("%Y.%m.%d", published) views = group_int_digits(j["statistics"]["viewCount"], ",") likes = j["statistics"].get("likeCount", 0) dislikes = j["statistics"].get("dislikeCount", 0) title = j["snippet"]["title"] if "localized" in j["snippet"]: title = j["snippet"]["localized"].get("title") or title out = ("\x02{title}\x02 - length \x02{duration}\x02 - " "{likes}\u2191{dislikes}\u2193 - " "\x02{views}\x02 views - " "\x02{snippet[channelTitle]}\x02 on \x02{published}\x02").format( duration=duration, likes=likes, dislikes=dislikes, views=views, published=published, title=title, **j) # TODO: figure out how to detect NSFW videos return out
def lastfm(inp, nick='', db=None, bot=None, notice=None): """lastfm [user] [dontsave] -- Displays the now playing (or last played) track of LastFM user [user]. Other commands are: .gi .genre .profile .top .b .compare""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" # check if the user asked us not to save his details dontsave = inp.endswith(" dontsave") if dontsave: user = inp[:-9].strip().lower() else: user = inp db.execute("create table if not exists lastfm(nick primary key, acc)") if not user: user = db.execute("select acc from lastfm where nick=lower(?)", (nick, )).fetchone() if not user: notice(lastfm.__doc__) return user = user[0] response = http.get_json(api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1) if 'error' in response: return "Error: {}.".format(response["message"]) if not "track" in response["recenttracks"] or len( response["recenttracks"]["track"]) == 0: return 'No recent tracks for user "{}" found.'.format(user) tracks = response["recenttracks"]["track"] if type(tracks) == list: # if the user is listening to something, the tracks entry is a list # the first item is the current track track = tracks[0] status = u'is listening to' ending = '.' elif type(tracks) == dict: # otherwise, they aren't listening to anything right now, and # the tracks entry is a dict representing the most recent track track = tracks status = u'last listened to' # lets see how long ago they listened to it time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) time_since = timesince.timesince(time_listened) ending = u' ({} ago)'.format(time_since) else: return "error: could not parse track listing" title = track["name"] album = track["album"]["#text"] artist = track["artist"]["#text"] link = track["url"] linkshort = web.isgd(link) title2 = unicode(title) artist2 = unicode(artist) response2 = http.get_json(api_url, method="track.getinfo", api_key=api_key, track=title2, artist=artist2, username=user, autocorrect=1) trackdetails = response2["track"] if type(trackdetails) == list: track2 = trackdetails[0] elif type(trackdetails) == dict: track2 = trackdetails if "userplaycount" in trackdetails: playcounts = trackdetails["userplaycount"] else: playcounts = 0 if "tag" in track2["toptags"]: genres1 = track2["toptags"]["tag"] genresstr = str(genres1) #First genre genres3 = genresstr.split("u'name': u'", 1)[1] genres4 = genres3.split("'", 1)[0] genres = genres4 else: genres = "(No tags found)" try: #Second genre genres5 = genres3.split("u'name': u'", 1)[1] genres6 = genres5.split("'", 1)[0] genres = genres4, genres6 genres = str(genres) genres = genres.replace("'", "") except UnboundLocalError: genres = "(No tags found)" except IndexError: genres = '({})'.format(genres) try: #Third genre genres7 = genres5.split("u'name': u'", 1)[1] genres8 = genres7.split("'", 1)[0] genres = genres4, genres6, genres8 genres = str(genres) genres = genres.replace("'", "") except UnboundLocalError: genres = '{}'.format(genres) except IndexError: genres = '{}'.format(genres) length1 = track2["duration"] lengthsec = float(length1) / 1000 length = time.strftime('%H:%M:%S', time.gmtime(lengthsec)) length = length.lstrip("0:") ##length = length.split(":",1)[1] if len(length) == 2: length = '0:' + length elif len(length) == 1: length = '0:0' + length out = u'{} {} "{}"'.format(user, status, title) if artist: out += u' by \x02{}\x0f'.format(artist) if album: out += u' from the album \x02{}\x0f'.format(album) if length: out += u' [{}]'.format(length) if playcounts: out += u' [plays: {}]'.format(playcounts) if playcounts == 0: out += u' [plays: {}]'.format(playcounts) if genres: out += u' {}'.format(genres) if linkshort: out += u' ({})'.format(linkshort) # append ending based on what type it was out += ending if inp and not dontsave: db.execute("insert or replace into lastfm(nick, acc) values (?,?)", (nick.lower(), user)) db.commit() return out
def profile(inp, nick='', db=None, bot=None, notice=None): """profile -- Displays information for selected profile from last.fm db. """ api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" fetchprof = db.execute("select acc from lastfm where nick=lower(?)", (inp, )).fetchone() fetchprof = fetchprof[0] if fetchprof else inp response = http.get_json(api_url, method="user.getinfo", api_key=api_key, user=fetchprof) if 'error' in response: return "Error: {}.".format(response["message"]) userprof = response["user"] username = userprof["name"] userreal = userprof["realname"] userreal = userreal.strip(' ') usercntry = userprof["country"] userage = userprof["age"] userplays = userprof["playcount"] usergendr = userprof["gender"] usertime = userprof["registered"]["#text"] userurl = userprof["url"] urlshort = web.isgd(userurl) #If name not given, outputs 'Unknown' if userreal == '': userreal = "Unknown" #Converts country code to word if str(usercntry) == "NZ": usercntry = "New Zealand" elif str(usercntry) == "CA": usercntry = "Canada" elif str(usercntry) == "US": usercntry = "the United States" elif str(usercntry) == "UK": usercntry = "the United Kingdom" elif str(usercntry) == "AU": usercntry = "Australia" #Converts gender to word if str(usergendr) == "m": usergendr = "man" elif str(usergendr) == "f": usergendr = "female" else: usergendr = "unknown gender" #Account age date_method = '%Y-%m-%d %H:%M' regstrdate = datetime.strptime(usertime, date_method) todaysdate = datetime.now() ##todaysdate = todaysdate.strftime(date_method) accage = todaysdate - regstrdate accage = accage.days accageyrs = int(accage / 365.25) accagemnths = int(accage / 30.33) if accagemnths > 12: accagemnths = int(accagemnths % 12) accage = int(accage % 30.33) out = '' if username: out += u'\x02Last.fm profile for {}:\x0f'.format(username) if userreal: out += u' {}'.format(userreal) if usergendr: out += u' is a {}'.format(usergendr) if usercntry: out += u' from {}'.format(usercntry) if userage: out += u' aged {}'.format(userage) if userplays: out += u' who has played {} songs'.format(userplays) if accageyrs > 1: out += u' in {} years and {} months.'.format(accageyrs, accagemnths) else: out += u' in {} months and {} days.'.format(accagemnths, accage) if urlshort: out += u' ({}).'.format(urlshort) return out
def bitcoin(inp, say=None): ".bitcoin -- gets current exchange rate for bitcoins from BTC-e" data = http.get_json("https://btc-e.com/api/2/btc_usd/ticker") say("USD/BTC: \x0307{buy:.0f}\x0f - High: \x0307{high:.0f}\x0f" " - Low: \x0307{low:.0f}\x0f - Volume: {vol_cur:.0f}".format( **data['ticker']))
def lastfm(inp, nick='', db=None, bot=None, notice=None): """lastfm [user] [dontsave] -- Displays the now playing (or last played) track of LastFM user [user].""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" # check if the user asked us not to save his details dontsave = inp.endswith(" dontsave") if dontsave: user = inp[:-9].strip().lower() else: user = inp db.execute("create table if not exists lastfm(nick primary key, acc)") if not user: user = db.execute("select acc from lastfm where nick=lower(?)", (nick,)).fetchone() if not user: notice(lastfm.__doc__) return user = user[0] response = http.get_json(api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1) if 'error' in response: return "Error: {}.".format(response["message"]) if not "track" in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0: return 'No recent tracks for user "{}" found.'.format(user) tracks = response["recenttracks"]["track"] if type(tracks) == list: # if the user is listening to something, the tracks entry is a list # the first item is the current track track = tracks[0] status = u'is listening to' ending = '.' elif type(tracks) == dict: # otherwise, they aren't listening to anything right now, and # the tracks entry is a dict representing the most recent track track = tracks status = u'last listened to' # lets see how long ago they listened to it time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) time_since = timesince.timesince(time_listened) ending = u' ({} ago)'.format(time_since) else: return "error: could not parse track listing" title = track["name"] album = track["album"]["#text"] artist = track["artist"]["#text"] link = track["url"] try: link = web.isgd(link) except: print "Error shortening link" title2 = unicode(title) artist2 = unicode(artist) response2 = http.get_json(api_url, method="track.getinfo", api_key=api_key,track=title2, artist=artist2, username=user, autocorrect=1) trackdetails = response2["track"] if type(trackdetails) == list: track2 = trackdetails[0] elif type(trackdetails) == dict: track2 = trackdetails if "userplaycount" in trackdetails: playcounts = trackdetails["userplaycount"] else: playcounts = 0 toptags = http.get_json(api_url, method="artist.gettoptags", api_key=api_key, artist=artist) genreList = [] genres = "(" if "tag" in toptags["toptags"]: for(i, tag) in enumerate(toptags["toptags"]["tag"]): genreList.append(tag["name"]) if(i == 2): break for singleGenre in genreList: if(singleGenre == genreList[-1]): genres += u"{}".format(singleGenre) genres += ")" else: genres += u"{}, ".format(singleGenre) else: genres = "(No tags)" length1 = track2["duration"] lengthsec = float(length1) / 1000 length = time.strftime('%M:%S', time.gmtime(lengthsec)) length = length.lstrip("0") out = u'{} {} "{}"'.format(user, status, title) if artist: out += u' by \x02{}\x0f'.format(artist) if album: out += u' from the album \x02{}\x0f'.format(album) if length: out += u' [{}]'.format(length) if playcounts: out += u' [plays: {}]'.format(playcounts) if playcounts == 0: out += u' [plays: {}]'.format(playcounts) if genres: out += u' {}'.format(genres) # append ending based on what type it was out += ending if inp and not dontsave: db.execute("insert or replace into lastfm(nick, acc) values (?,?)", (nick.lower(), user)) db.commit() return out
def api_get(query, key, is_image=None, num=1): url = ( "https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw" "&fields=items(title,link,snippet)&safe=off&nfpr=1" + ("&searchType=image" if is_image else "")) return http.get_json(url, key=key, q=query, num=num)
def get_token(api_key): return http.get_json("https://api.thetvdb.com/login", headers={'Content-Type': 'application/json'}, get_method='POST', post_data=dumps(api_key))['token']
def newegg_url(match): item_id = match.group(1) item = http.get_json(API_PRODUCT.format(item_id)) return format_item(item, show_url=False)
def steam(inp, url_pasted=None, chan='', nick='', reply=None, api_key=None, db=None): ".steam -- gets Steam profile info for <user>" db.execute( "create table if not exists " "steam(chan, nick, user, primary key(chan, nick))" ) if inp[0:1] == '@': nick = inp[1:].strip() user = None dontsave = True else: user = inp dontsave = user.endswith(" dontsave") if dontsave: user = user[:-9].strip().lower() if not user: user = db.execute( "select user from steam where chan=? and nick=lower(?)", (chan, nick)).fetchone() if not user: return steam.__doc__ user = user[0] getid_url = "http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/" steamid = http.get_json(getid_url, key = api_key, vanityurl = user) if steamid["response"]["success"] == 42: #fail response return "error: "+ str(steamid["response"]["success"]) + " ("+ steamid["response"]["message"] +")" request_url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/" steamapi = http.get_json(request_url, key = api_key, steamids = steamid["response"]["steamid"]) response = steamapi["response"]["players"][0] status = {0: "\x0304Offline\x03", 1: "\x0303Online\x03", 2: "\x0308Busy\x03", 3: "\x0308Away\x03", 4: "\x0308Snooze\x03", 5: "\x0303looking to trade\x03", 6: "\x0303looking to play\x03"} ret = "\x02"+ response["personaname"] + "\x0f [" + status[response["profilestate"]] +"]" if "gameid" not in response: ret += " | Not playing anything." else: ret += " | Playing \x02"+ response["gameextrainfo"] +"\x02" if url_pasted is None: ret += " | "+ profile_url + user return ret if inp and not dontsave: db.execute( "insert or replace into steam(chan, nick, user) " "values (?, ?, ?)", (chan, nick.lower(), inp)) db.commit()
def get_sales(mask): # Fetch data data = get_featuredcategories() flash_data = get_featured() # Break if either return empty - might be unnecessary if not data or not flash_data: return {} # Aggregate data fetchtime = int(time.time()) data["flash"] = {} data["flash"]["name"] = "Flash Sales" data["flash"]["items"] = [] data["featured"] = {} data["featured"]["name"] = "Featured Sales" data["featured"]["items"] = [] for item in flash_data["large_capsules"]: if "discount_expiration" not in item.keys(): item["discount_expiration"] = 9999999999 if item["discount_expiration"] - fetchtime <= 43200: data["flash"]["items"].append(item) else: data["featured"]["items"].append(item) # Check for no data if sum([ len(c_data.get('items', {})) for category, c_data in data.iteritems() if isinstance(c_data, dict) ]) == 0: return {}, False # Mask Data data = { k: v for k, v in data.items() if isinstance(v, dict) and k not in mask } if debug: log_sales_data(data, "data") # Format data sales = {} for category in data: if "items" not in data[category].keys(): data[category]["items"] = [] for item in data[category]["items"]: # Prepare item data try: # Bundles if set(["id", "url"]).issubset(set(item.keys())): if not item["final_price"] and not item["discounted"]: item["final_price"] = web.try_googl(item["url"]) item["discounted"] = True else: # Midweek Madness, etc if "url" in item.keys() and "id" not in item.keys(): data[category][ "name"] = item["name"] or data[category]["name"] item["id"] = str(item["url"])[34:-1] appdata = http.get_json( "http://store.steampowered.com/api/" "appdetails/?appids={}".format(item["id"]))[str( item["id"])]["data"] item["name"] = appdata["name"] if "Free to Play" in appdata["genres"]: item["final_price"] = 'Free to Play' item["discount_percent"] = '100' else: item["final_price"] = appdata["price_overview"][ "final"] item["discount_percent"] = appdata[ "price_overview"]["discount_percent"] item["discounted"] = True if int(item["discount_percent"]) > 0 \ else False except: # Unusuable Catagory e.g. Banner Announcments continue # Add appropriate item data to sales if item["discounted"]: item["name"] = item["name"].replace(" Advertising App", "") item = { k: u"{}".format(v) for k, v in item.items() if k in ["name", "final_price", "discount_percent"] } if data[category]["name"] not in sales.keys(): sales[data[category]["name"]] = [] sales[data[category]["name"]].append(item) # Filter and sort items sales = { category: sorted([item for item in items if item["name"] != "Uninitialized"], key=lambda x: x["name"]) for category, items in sales.items() } if debug: log_sales_data(sales, "sales") # Return usable data return sales, True
def twitter(inp, api_key=None): ".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " "get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result" if not isinstance(api_key, dict) or any( key not in api_key for key in ("consumer", "consumer_secret", "access", "access_secret")): return "error: api keys not set" getting_id = False doing_search = False index_specified = False if re.match(r"^\d+$", inp): getting_id = True request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp else: try: inp, index = re.split("\s+", inp, 1) index = int(index) index_specified = True except ValueError: index = 0 if index < 0: index = 0 if index >= 20: return "error: only supports up to the 20th tweet" if re.match(r"^#", inp): doing_search = True request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote( inp) else: request_url = ( "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s" % inp) try: tweet = http.get_json(request_url, oauth=True, oauth_keys=api_key, tweet_mode="extended") except http.HTTPError as e: errors = { 400: "bad request (ratelimited?)", 401: "unauthorized", 403: "forbidden", 404: "invalid user/id", 500: "twitter is broken", 502: 'twitter is down ("getting upgraded")', 503: "twitter is overloaded (lol, RoR)", 410: "twitter shut off api v1.", } if e.code == 404: return "error: invalid " + ["username", "tweet id"][getting_id] if e.code in errors: return "error: " + errors[e.code] return "error: unknown %s" % e.code if doing_search: try: tweet = tweet["statuses"] if not index_specified: index = random.randint(0, len(tweet) - 1) except KeyError: return "error: no results" if not getting_id: try: tweet = tweet[index] except IndexError: return "error: not that many tweets found" if "retweeted_status" in tweet: rt = tweet["retweeted_status"] rt_text = http.unescape(rt["full_text"]).replace("\n", " ") text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text) else: text = http.unescape(tweet["full_text"]).replace("\n", " ") screen_name = tweet["user"]["screen_name"] time = tweet["created_at"] time = strftime("%Y-%m-%d %H:%M:%S", strptime(time, "%a %b %d %H:%M:%S +0000 %Y")) return "%s \x02%s\x02: %s" % (time, screen_name, text)
def lastfm(inp, nick='', db=None, bot=None, notice=None): """lastfm [user] [dontsave] -- Displays the now playing (or last played) track of LastFM user [user].""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" # check if the user asked us not to save his details dontsave = inp.endswith(" dontsave") if dontsave: user = inp[:-9].strip().lower() else: user = inp db.execute("create table if not exists lastfm(nick primary key, acc)") if not user: user = db.execute("select acc from lastfm where nick=lower(?)", (nick, )).fetchone() if not user: notice(lastfm.__doc__) return user = user[0] response = http.get_json(api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1) if 'error' in response: return u"Error: {}.".format(response["message"]) if not "track" in response["recenttracks"] or len( response["recenttracks"]["track"]) == 0: return u'No recent tracks for user "{}" found.'.format(user) tracks = response["recenttracks"]["track"] if type(tracks) == list: # if the user is listening to something, the tracks entry is a list # the first item is the current track track = tracks[0] status = 'is listening to' ending = '.' elif type(tracks) == dict: # otherwise, they aren't listening to anything right now, and # the tracks entry is a dict representing the most recent track track = tracks status = 'last listened to' # lets see how long ago they listened to it time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) time_since = timesince.timesince(time_listened) ending = ' ({} ago)'.format(time_since) else: return "error: could not parse track listing" title = track["name"] album = track["album"]["#text"] artist = track["artist"]["#text"] out = u'{} {} "{}"'.format(user, status, title) if artist: out += u" by \x02{}\x0f".format(artist) if album: out += u" from the album \x02{}\x0f".format(album) # append ending based on what type it was out += ending if inp and not dontsave: db.execute("insert or replace into lastfm(nick, acc) values (?,?)", (nick.lower(), user)) db.commit() return out
if claim == 'Claim': print("!!!", content) m = re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED|CORRECTLY ATTRIBUTED|(?<=Status:).*", content) if m: status = m.group(0) else: status = '???' claim = re.sub(r"[\s\xa0]+", " ", http.unescape(claim)).strip() # compress whitespace status = re.sub(r"[\s\xa0]+", " ", http.unescape(status)).title().strip() if len(claim) > 300: claim = claim[:300] + '...' return "Claim: {0} Status: {1} {2}".format(claim, status, permalink) if __name__ == '__main__': a = http.get_json(SEARCH_URL, post_data=json.dumps({"params": http.urlencode({"query": "people", "hitsPerPage": 1000})}).encode('utf8')) print(len(a['hits'])) try: for x in a['hits']: if x['post_type'] == 'fact_check': f = fmt(x) print(f) assert len(f) < 400 except AttributeError: print(x['permalink']) print(x['content']) raise
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None): ".weather <location> [dontsave] | @<nick> -- gets weather data from " \ "Wunderground http://wunderground.com/weather/api" if not api_key: return None # this database is used by other plugins interested in user's locations, # like .near in tag.py db.execute("create table if not exists " "location(chan, nick, loc, lat, lon, primary key(chan, nick))") if inp[0:1] == '@': nick = inp[1:].strip() loc = None dontsave = True else: dontsave = inp.endswith(" dontsave") # strip off the " dontsave" text if it exists and set it back to `inp` # so we don't report it back to the user incorrectly if dontsave: inp = inp[:-9].strip().lower() loc = inp if not loc: # blank line loc = db.execute( "select loc from location where chan=? and nick=lower(?)", (chan, nick)).fetchone() if not loc: try: # grab from old-style weather database loc = db.execute("select loc from weather where nick=lower(?)", (nick, )).fetchone() except db.OperationalError: pass # no such table if not loc: return weather.__doc__ loc = loc[0] params = [http.quote(p.strip()) for p in loc.split(',')] loc = params[0] state = '' # Try to interpret the query based on the number of commas. # Two commas might be city-state city-country, or lat-long pair if len(params) == 2: state = params[1] # Check to see if a lat, long pair is being passed. This could be done # more completely with regex, and converting from DMS to decimal # degrees. This is nice and simple, however. try: float(loc) float(state) loc = loc + ',' + state state = '' except ValueError: state += '/' # Assume three commas is a city-state-country triplet. Discard the state # portion because that's what the API expects elif len(params) == 3: loc = params[0] state = params[2] + '/' url = 'http://api.wunderground.com/api/' query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \ .format(key=api_key, state=state, loc=loc) url += query try: parsed_json = http.get_json(url) except IOError: return 'Could not get data from Wunderground' info = {} if 'current_observation' not in parsed_json: resp = 'Could not find weather for {inp}. '.format(inp=inp) # In the case of no observation, but results, print some possible # location matches if 'results' in parsed_json['response']: resp += 'Possible matches include: ' results = parsed_json['response']['results'] for place in results[:6]: resp += '{city}, '.format(**place) if place['state']: resp += '{state}, '.format(**place) if place['country_name']: resp += '{country_name}; '.format(**place) resp = resp[:-2] reply(resp) return obs = parsed_json['current_observation'] sf = parsed_json['forecast']['simpleforecast']['forecastday'][0] info['city'] = obs['display_location']['full'] info['t_f'] = obs['temp_f'] info['t_c'] = obs['temp_c'] info['weather'] = obs['weather'] info['h_f'] = sf['high']['fahrenheit'] info['h_c'] = sf['high']['celsius'] info['l_f'] = sf['low']['fahrenheit'] info['l_c'] = sf['low']['celsius'] info['humid'] = obs['relative_humidity'] info['wind'] = 'Wind: {mph}mph/{kph}kph' \ .format(mph=obs['wind_mph'], kph=obs['wind_kph']) reply('{city}: {weather}, {t_f}F/{t_c}C' '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)' ', Humidity: {humid}, {wind}'.format(**info)) lat = float(obs['display_location']['latitude']) lon = float(obs['display_location']['longitude']) if inp and not dontsave: db.execute( "insert or replace into " "location(chan, nick, loc, lat, lon) " "values (?, ?, ?, ?, ?)", (chan, nick.lower(), inp, lat, lon)) db.commit()
def genre(inp, nick='', db=None, bot=None, notice=None): """genre -- Displays information for specified genre from last.fm db. """ api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" genretag = inp response = http.get_json(api_url, method="tag.search", api_key=api_key, tag=genretag, limit=1) if 'error' in response: return "Error: {}.".format(response["message"]) tagdetails = response["results"]["tagmatches"] try: if "url" in tagdetails["tag"]: link = tagdetails["tag"]["url"] linkshort = web.isgd(link) tagname = response["results"]["opensearch:Query"]["searchTerms"] tagname = tagname.title() else: return "Error: No such genre, check spelling." except TypeError: return "Error: No description found of this genre." responsesimilar = http.get_json(api_url, method="tag.getsimilar", api_key=api_key, tag=genretag) tagsimilar = responsesimilar["similartags"]["tag"] simgenstr = str(tagsimilar) if "name" in simgenstr: #First genre simgen1 = simgenstr.split("u'name': u'", 1)[1] simgen2 = simgen1.split("'", 1)[0] #Second genre simgen3 = simgen1.split("u'name': u'", 1)[1] simgen4 = simgen3.split("'", 1)[0] #Third genre simgen5 = simgen3.split("u'name': u'", 1)[1] simgen6 = simgen5.split("'", 1)[0] similartag = '{}, {}, {}'.format(simgen2, simgen4, simgen6) else: return "Error: No such genre, check spelling." responsetop = http.get_json(api_url, method="tag.gettopartists", api_key=api_key, tag=genretag) tagtopartist = responsetop["topartists"]["artist"] topartstr = str(tagtopartist) #First artist topart1 = topartstr.split("u'name': u'", 1)[1] topart2 = topart1.split("'", 1)[0] #Second artist topart3 = topart1.split("u'name': u'", 1)[1] topart4 = topart3.split("'", 1)[0] #Third artist topart5 = topart3.split("u'name': u'", 1)[1] topart6 = topart5.split("'", 1)[0] #Fourth artist topart7 = topart5.split("u'name': u'", 1)[1] topart8 = topart7.split("'", 1)[0] #Fifth artist topart9 = topart7.split("u'name': u'", 1)[1] topart10 = topart9.split("'", 1)[0] topartists = '{}, {}, {}, {}, {}'.format(topart2, topart4, topart6, topart8, topart10) responsedesc = http.get_json(api_url, method="tag.getInfo", api_key=api_key, tag=genretag) tagdesc = responsedesc["tag"]["wiki"] try: genredesc = tagdesc["summary"] genredesc = re.sub('<[^>]*>', '', genredesc) #genredesc = genredesc.split(".", 1)[0] genredesc = genredesc.replace(""", "") genredesc = (genredesc[:225] + '...') if len(genredesc) > 225 else genredesc except TypeError: return "Error: No summary found for this genre, check spelling." out = '' if tagname: out += u'\x02{}\x0f: '.format(tagname) if genredesc: out += u'{}'.format(genredesc) if similartag: out += u' \x02Similar genres\x0f: ({})'.format(similartag) if topartists: out += u' \x02Top artists\x0f: ({})'.format(topartists) if linkshort: out += u' ({})'.format(linkshort) return out
def twitter(inp, say=None, api_key=None): """twitter <user|user n|id|#search|#search n> - Get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result.""" if not isinstance(api_key, dict) or any(key not in api_key for key in ('consumer', 'consumer_secret', 'access', 'access_secret')): return "error: api keys not set" getting_id = False doing_search = False index_specified = False if re.match(r'^\d+$', inp): getting_id = True request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp else: try: inp, index = re.split('\s+', inp, 1) index = int(index.strip('-')) index_specified = True except ValueError: index = 0 if index < 0: index = 0 if index >= 20: return 'Error: only supports up to the 20th tweet' if re.match(r'^#', inp): doing_search = True request_url = "https://api.twitter.com/1.1/search/tweets.json" params = {'q': quote(inp)} else: request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json" params = {'screen_name': inp, 'exclude_replies': True, 'include_rts': False, 'tweet_mode': 'extended'} try: tweet = http.get_json(request_url, query_params=params, oauth=True, oauth_keys=api_key) except http.HTTPError as e: errors = {400: 'bad request (ratelimited?)', 401: 'unauthorized (private)', 403: 'forbidden', 404: 'invalid user/id', 500: 'twitter is broken', 502: 'twitter is down ("getting upgraded")', 503: 'twitter is overloaded (lol, RoR)', 410: 'twitter shut off api v1.'} if e.code == 404: return 'Error: invalid ' + ['username', 'tweet id'][getting_id] if e.code in errors: return 'Error: ' + errors[e.code] return 'Error: unknown %s' % e.code if doing_search: try: tweet = tweet["statuses"] if not index_specified: index = random.randint(0, len(tweet) - 1) except: return 'Error: no results' if not getting_id: try: tweet = tweet[index] except IndexError: return 'Error: not that many tweets found' tweet['full_text'] = http.h.unescape(tweet['full_text']) if 1 < len([t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0]) < 5: tweet['full_text'] = re.sub(r'(.*?)(https:\/\/t.co\/.*)', r'\1\n\2', tweet['full_text']) say(u'{} (@{}) on Twitter:'.format(tweet['user']['name'].encode('ascii', 'ignore'), tweet['user']['screen_name'])) for line in [t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0]: say(u' {}'.format(line)) else: say(u'{} (@{}) on Twitter: "{}"'.format(tweet['user']['name'].encode('ascii', 'ignore'), tweet['user']['screen_name'], ' | '.join([t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0])))
def card_search(query): base_url = "https://api.deckbrew.com" name = urllib.quote_plus(query) search_url = base_url + "/mtg/cards?name=" + name return http.get_json(search_url)
def nowplaying(inp,nick='',server='',reply=None,db=None,api_key=None): """.nowplaying/.np <user> -- returns the latest played (or currently playing track) for a specified Last.fm user """\ """or the current nick if no parameters are passed. This nick / user combination will be saved, """\ """and the user will only have to type .np to return the data.""" try: username, extra = inp.split(' ') return ".nowplaying/.np <user> -- lists the currently playing or last played for a Last.fm user" except ValueError: user = inp db.execute("create table if not exists nowplaying(nick primary key, user)") newuser = None if not user: user = db.execute("select user from nowplaying where nick=lower(?)", (nick,)).fetchone() if not user: user = nick newuser = '******' else: user = user[0] tracks_json = http.get_json(api_url, method='user.getrecenttracks', user=user, api_key=api_key) try: #check if user exists return tracks_json['message'] except: pass output = user + ' ' try: track = tracks_json['recenttracks']['track'][0] except: return user + ' has never scrobbled a track.' try: if track['@attr']['nowplaying']: output += 'is now listening to: ' except: output += 'last listened to: ' artist = track['artist']['#text'] track_name = track['name'] output += artist + ' - ' + track_name + ' ' if track['album']['#text']: output += '[' + track['album']['#text'] + '] ' track_info = http.get_json(api_url, method='track.getinfo', track=track_name, artist=artist, username=user, api_key=api_key) track_length = int(track_info['track']['duration']) / 1000 output += '(' if track_length / 3600: output += str(track_length / 3600) + 'h ' if track_length / 60: output += str(track_length / 60) + 'm ' output += str(track_length % 60) + 's)' loved = 0 try: plays = track_info['track']['userplaycount'] loved = track_info['track']['userloved'] except KeyError: output += ' | First play' return output if plays == '1': plays_suffix = ' play' else: plays_suffix = ' plays' if loved == '1': plays_suffix += ' | <3' output += ' | ' + plays + plays_suffix reply(output) if inp or newuser: db.execute("insert or replace into nowplaying(nick, user) values (?,?)", (nick.lower(), user)) db.commit()
def imgur(inp): """imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images""" if inp: # see if the input ends with "nsfw" show_nsfw = inp.endswith(" nsfw") # remove "nsfw" from the input string after checking for it if show_nsfw: inp = inp[:-5].strip().lower() url = base_url.format(inp.strip()) else: url = "http://www.reddit.com/domain/imgur.com/.json" show_nsfw = False try: data = http.get_json(url, user_agent=http.ua_chrome) except Exception as e: return "Error: " + str(e) data = data["data"]["children"] random.shuffle(data) # filter list to only have imgur links filtered_posts = [i["data"] for i in data if is_valid(i["data"])] if not filtered_posts: return "No images found." items = [] headers = {"Authorization": "Client-ID b5d127e6941b07a"} # loop over the list of posts for post in filtered_posts: if post["over_18"] and not show_nsfw: continue match = imgur_re.search(post["url"]) if match.group(1) == 'a/': # post is an album url = album_api.format(match.group(2)) images = http.get_json(url, headers=headers)["data"] # loop over the images in the album and add to the list for image in images: items.append(image["id"]) elif match.group(2) is not None: # post is an image items.append(match.group(2)) if not items: return "No images found (use .imgur <subreddit> nsfw to show explicit content)" if show_nsfw: return "{} \x02NSFW\x02".format( web.isgd("http://imgur.com/" + ','.join(items))) else: return web.isgd("http://imgur.com/" + ','.join(items))
def api_get(kind, query): url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ 'v=1.0&safe=off' return http.get_json(url % kind, q=query)
def api_get(kind, query): """Use the RESTful Google Search API""" url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ 'v=1.0&safe=moderate' return http.get_json(url % kind, q=query)
def card_search(name): return http.get_json('https://api.deckbrew.com/mtg/cards', name=name)
def get_video_description(key, video_id): request = http.get_json(api_url, key=key, id=video_id) if request.get('error'): return data = request['items'][0] title = filter(None, data['snippet']['title'].split(' ')) title = ' '.join(map(lambda s: s.strip(), title)) out = u'\x02{}\x02'.format(title) try: data['contentDetails'].get('duration') except KeyError: return out length = data['contentDetails']['duration'] timelist = re.findall('(\d+[DHMS])', length) seconds = 0 for t in timelist: t_field = int(t[:-1]) if t[-1:] == 'D': seconds += 86400 * t_field elif t[-1:] == 'H': seconds += 3600 * t_field elif t[-1:] == 'M': seconds += 60 * t_field elif t[-1:] == 'S': seconds += t_field out += u' - length \x02{}\x02'.format( timeformat.format_time(seconds, simple=True)) try: data['statistics'] except KeyError: return out stats = data['statistics'] try: likes = plural(int(stats['likeCount']), "like") dislikes = plural(int(stats['dislikeCount']), "dislike") try: percent = 100 * float(stats['likeCount']) / ( int(stats['likeCount']) + int(stats['dislikeCount'])) except ZeroDivisionError: percent = 0 except KeyError: likes = 'likes disabled' dislikes = 'dislikes disabled' percent = 0 out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes, dislikes, percent) views = int(stats['viewCount']) out += u' - \x02{:,}\x02 {}{}'.format(views, 'view', "s"[views == 1:]) uploader = data['snippet']['channelTitle'] upload_time = time.strptime(data['snippet']['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z") out += u' - \x02{}\x02 on \x02{}\x02'.format( uploader, time.strftime("%Y.%m.%d", upload_time)) try: data['contentDetails']['contentRating'] except KeyError: return out out += u' - \x034NSFW\x02' return out
def lastfm(inp, nick='', db=None, bot=None, notice=None): "lastfm [username | @ nick] [save] -- Displays the now playing (or last played) track of LastFM user [username]." api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "error: no api key set" save = False if '@' in inp: nick = inp.split('@')[1].strip() user = database.get(db, 'users', 'lastfm', 'nick', nick) if not user: return "No lastfm user stored for {}.".format(nick) # else: if inp.split(' ')[-1] == "url": return "[{}]: http://www.last.fm/user/{}".format(user,user) else: user = database.get(db, 'users', 'lastfm', 'nick', nick) if not inp: if not user: notice(lastfm.__doc__) return else: if inp.split(' ')[-1] == "url": return "[{}]: http://www.last.fm/user/{}".format(user, user) if not user: save = True if " save" in inp: save = True user = inp.split()[0] # if inp.split(' ')[-1] == "url": return "[{}]: http://www.last.fm/user/{}".format(user,user) response = http.get_json(api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1) if 'error' in response: return "Error: {}.".format(response["message"]) if not "track" in response["recenttracks"] or len( response["recenttracks"]["track"]) == 0: return 'No recent tracks for user "{}" found.'.format(user) tracks = response["recenttracks"]["track"] # if the user is listening to something, the tracks entry is a list the first item is the current track if type(tracks) == list: track = tracks[0] status = 'is listening to' ending = '.' # otherwise, they aren't listening to anything right now, and the tracks entry is a dict representing the most recent track elif type(tracks) == dict: track = tracks status = 'last listened to' time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) time_since = timesince.timesince(time_listened) ending = u' ({} ago)'.format(time_since) else: return "error: could not parse track listing" title = track["name"] album = track["album"]["#text"] artist = track["artist"]["#text"] out = u'{} {} "{}"'.format(user, status, title) if artist: out += u" by \x02{}\x0f".format(artist) if album: out += u" from the album \x02{}\x0f".format(album) out += ending if user and save: database.set(db, 'users', 'lastfm', user, 'nick', nick) return out # ♫♫♫
def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None): """weather <location> [dontsave] -- Gets weather data for <location> from Wunderground.""" api_key = bot.config.get("api_keys", {}).get("wunderground") if not api_key: return "Error: No wunderground API details." # initialise weather DB db.execute("create table if not exists weather(nick primary key, loc)") # if there is no input, try getting the users last location from the DB if not inp: location = db.execute( "select loc from weather where nick=lower(:nick)", { "nick": nick }).fetchone() print(location) if not location: # no location saved in the database, send the user help text notice(weather.__doc__) return loc = location[0] # no need to save a location, we already have it dontsave = True else: # see if the input ends with "dontsave" dontsave = inp.endswith(" dontsave") # remove "dontsave" from the input string after checking for it if dontsave: loc = inp[:-9].strip().lower() else: loc = inp location = http.quote_plus(loc) request_url = base_url.format(api_key, "geolookup/forecast/conditions", location) response = http.get_json(request_url) if 'location' not in response: try: location_id = response['response']['results'][0]['zmw'] except KeyError: return "Could not get weather for that location." # get the weather again, using the closest match request_url = base_url.format(api_key, "geolookup/forecast/conditions", "zmw:" + location_id) response = http.get_json(request_url) if response['location']['state']: place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format( response['location']['city'], response['location']['state'], response['location']['country']) else: place_name = "\x02{}\x02 (\x02{}\x02)".format( response['location']['city'], response['location']['country']) forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0] forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][ 1] # put all the stuff we want to use in a dictionary for easy formatting of the output weather_data = { "place": place_name, "conditions": response['current_observation']['weather'], "temp_f": response['current_observation']['temp_f'], "temp_c": response['current_observation']['temp_c'], "humidity": response['current_observation']['relative_humidity'], "wind_kph": response['current_observation']['wind_kph'], "wind_mph": response['current_observation']['wind_mph'], "wind_direction": response['current_observation']['wind_dir'], "today_conditions": forecast_today['conditions'], "today_high_f": forecast_today['high']['fahrenheit'], "today_high_c": forecast_today['high']['celsius'], "today_low_f": forecast_today['low']['fahrenheit'], "today_low_c": forecast_today['low']['celsius'], "tomorrow_conditions": forecast_tomorrow['conditions'], "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'], "tomorrow_high_c": forecast_tomorrow['high']['celsius'], "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'], "tomorrow_low_c": forecast_tomorrow['low']['celsius'], "url": web.isgd(response["current_observation"]['forecast_url'] + "?apiref=e535207ff4757b18") } reply( "{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, " "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, " "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. " "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, " "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format( **weather_data)) if location and not dontsave: db.execute( "insert or replace into weather(nick, loc) values (:nick, :loc)", { "nick": nick.lower(), "loc": loc }) db.commit()
import time import random from util import hook, http, web, text ## CONSTANTS base_url = "http://api.bukget.org/3/" search_url = base_url + "search/plugin_name/like/{}" random_url = base_url + "plugins/bukkit/?start={}&size=1" details_url = base_url + "plugins/bukkit/{}" categories = http.get_json("http://api.bukget.org/3/categories") count_total = sum([cat["count"] for cat in categories]) count_categories = {cat["name"].lower(): int(cat["count"]) for cat in categories} # dict comps! class BukgetError(Exception): def __init__(self, code, text): self.code = code self.text = text def __str__(self): return self.text ## DATA FUNCTIONS