def test_try_shorten(mock_requests): mock_requests.add(mock_requests.GET, 'http://is.gd/create.php', json={'shorturl': 'https://is.gd/foobar'}) from cloudbot.util import web assert web.try_shorten('https://example.com', service='is.gd') == 'https://is.gd/foobar' mock_requests.replace(mock_requests.GET, 'http://is.gd/create.php', json={'errormessage': 'Error occurred'}) assert web.try_shorten('https://example.com', service='is.gd') == 'https://example.com'
def get_youtube_info(video_id, api_key=None): params = { "id": video_id, "key": api_key, "part": "snippet,contentDetails,statistics" } result = http.get_json(video_url, params=params) if result.get('error') or not result.get('items') or len(result['items']) < 1: return web.try_shorten(short_url+video_id) playtime = result['items'][0]['contentDetails']['duration'].strip('PT').lower() views = int(result['items'][0]['statistics']['viewCount']) return output_format.format(url=web.try_shorten(short_url+video_id), time=playtime, views=views, **result['items'][0]['snippet'])
def displaybandinfo(text, nick, bot, notice): """[artist] - displays information about [artist].""" if not text: notice(getbandinfo.__doc__) artist = getartistinfo(text, bot) if 'error' in artist: return 'No such artist.' a = artist['artist'] name = a["name"] summary = a["bio"]["summary"] tags = getartisttags(text, bot) try: url = web.try_shorten(a["url"]) except: url = a["url"] pass out = "{}: ".format(a["name"]) out += summary if summary else "No artist summary listed." out += " {}".format(url) out += " ({})".format(tags) return out
def get_info(url, show_url=True): """ Takes a SCPWiki URL and returns a formatted string """ try: request = requests.get(url) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: raise SCPError("Error: Unable to fetch URL. ({})".format(e)) html = request.text contents = formatting.strip_html(html) try: item_id = re.findall("Item #: (.+?)\n", contents, re.S)[0] object_class = re.findall("Object Class: (.+?)\n", contents, re.S)[0] description = re.findall("Description: (.+?)\n", contents, re.S)[0] except IndexError: raise SCPError( "Error: Invalid or unreadable SCP. Does this SCP exist?") description = formatting.truncate(description, 130) short_url = web.try_shorten(url) # get the title from our pre-generated cache if item_id in scp_cache: title = scp_cache[item_id][0] else: title = "Unknown" if show_url: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {} - {}".format(title, item_id, object_class, description, short_url) else: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {}".format(title, item_id, object_class, description)
def get_info(url, show_url=True): """ Takes a SCPWiki URL and returns a formatted string """ try: request = requests.get(url) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: raise SCPError("Error: Unable to fetch URL. ({})".format(e)) html = request.text contents = formatting.strip_html(html) try: item_id = re.findall("Item #: (.+?)\n", contents, re.S)[0] object_class = re.findall("Object Class: (.+?)\n", contents, re.S)[0] description = re.findall("Description: (.+?)\n", contents, re.S)[0] except IndexError: raise SCPError("Error: Invalid or unreadable SCP. Does this SCP exist?") description = formatting.truncate(description, 130) short_url = web.try_shorten(url) # get the title from our pre-generated cache if item_id in scp_cache: title = scp_cache[item_id][0] else: title = "Unknown" if show_url: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {} - {}".format(title, item_id, object_class, description, short_url) else: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {}".format(title, item_id, object_class, description)
def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items,".format(len(playlist['tracks'])) seconds = round(int(playlist['duration'])/1000) out += " {}".format(timeformat.format_time(seconds, simple=True)) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def rotten_tomatoes(text, reply): """<title> - gets ratings for <title> from Rotten Tomatoes""" api_key = bot.config.get_api_key("rottentomatoes") if not api_key: return "No Rotten Tomatoes API key set." title = text.strip() params = { 'q': title, 'apikey': api_key } try: request = requests.get(movie_search_url, params=params) request.raise_for_status() except HTTPError as e: reply("Error searching: {}".format(e.response.status_code)) raise if request.status_code != requests.codes.ok: return "Error searching: {}".format(request.status_code) results = request.json() if results['total'] == 0: return 'No results.' movie = results['movies'][0] title = movie['title'] movie_id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = web.try_shorten(movie['links']['alternate']) if critics_score == -1: return "\x02{}\x02 - Critics Rating: \x02No Reviews\x02, " \ "Audience Rating: \x02{}%\x02 - {}".format(title, audience_score, url) review_params = { 'review_type': 'all', 'apikey': api_key } review_request = requests.get(movie_reviews_url.format(movie_id), params=review_params) try: review_request.raise_for_status() except HTTPError as e: reply("Error searching: {}".format(e.response.status_code)) raise if review_request.status_code != requests.codes.ok: return "Error searching: {}".format(review_request.status_code) reviews = review_request.json() review_count = reviews['total'] fresh = int(critics_score * review_count / 100) rotten = review_count - fresh return "\x02{}\x02 - Critics Rating: \x02{}%\x02 ({} liked, {} disliked), " \ "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) - {}".format( name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) - {}".format( name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} - {}".format( version_number, bukkit_versions, last_update, link) return line_a, line_b
def imdb(text): """<movie> [year] - Gets information about a movie from IMDb.""" api_key = bot.config.get_api_key("omdb") if not api_key: return "This command requires an API key from omdb.com." year = "" if text.split()[-1].isdigit(): text, year = ' '.join(text.split()[:-1]), text.split()[-1] try: content = http.get_json("http://www.omdbapi.com/", apikey=api_key, t=text, y=year, plot='short', r='json') except: return "OMDB API error, please try again in a few minutes." if content['Response'] == 'False': return content['Error'] elif content['Response'] == 'True': content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content out = '\x02{Title}\x02 ({Year}) ({Genre}): {Plot}' if content['Runtime'] != 'N/A': out += ' \x02{Runtime}\x02.' if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A': out += ' \x02{imdbRating}/10\x02 with \x02{imdbVotes}\x02 votes. ' out += web.try_shorten('{URL}'.format(**content)) return out.format(**content) else: return "Error parsing movie information."
def sptfy(text, sptfy=False): if sptfy: shortenurl = "http://sptfy.com/index.php" data = { "longUrl": text, "shortUrlDomain": 1, "submitted": 1, "shortUrlFolder": 6, "customUrl": "", "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0 } try: soup = http.get_soup(shortenurl, data=data, cookies=True) except: return text try: link = soup.find("div", {"class": "resultLink"}).text.strip() return link except: message = "Unable to shorten URL: %s" % \ soup.find("div", {"class": "messagebox_text"}).find("p").text.split("<br/>")[0] return message else: return web.try_shorten(text)
def news(bot, chan, text=None): global CACHE if text: channels = CACHE.get(DB_Q.channels.exists()) if 'unsubscribe' in text.lower(): if channels: if chan in channels['channels']: channels['channels'].remove(chan) CACHE.update(channels, eids=[channels.eid]) return 'Successfully unsubscribed!' elif 'subscribe' in text.lower(): if not channels: CACHE.insert({'channels': [chan]}) elif chan not in channels['channels']: channels['channels'].append(chan) CACHE.update(channels, eids=[channels.eid]) return 'Successfully subscribed!' feed = parse('https://www.bungie.net/en/Rss/NewsByCategory?category=destiny¤tpage=1&itemsPerPage=1') if not feed.entries: return 'Feed not found.' return '{} - {}'.format( feed['entries'][0]['summary'], try_shorten(feed['entries'][0]['link']))
def fact(): """- gets a random fact from OMGFACTS""" attempts = 0 # all of this is because omgfacts is fail while True: try: soup = http.get_soup('http://www.omg-facts.com/random') except (http.HTTPError, http.URLError): if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue response = soup.find('a', {'class': 'surprise'}) link = response['href'] fact_data = ''.join(response.find(text=True)) if fact_data: fact_data = fact_data.strip() break else: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue url = web.try_shorten(link) return "{} - {}".format(fact_data, url)
def pronounce(text, event): """<word> - Returns instructions on how to pronounce <word> with an audio example.""" lookup = PronounciationLookupRequest(text) try: audio_response = list(lookup.get_filtered_results())[:5] except WordNotFound: return colors.parse( "Sorry, I don't know how to pronounce $(b){}$(b).").format(text) except WordnikAPIError as e: event.reply(e.user_msg()) raise out = colors.parse("$(b){}$(b): ").format(text) out += " • ".join([i['raw'] for i in audio_response]) audio_lookup = AudioLookupRequest(text) try: audio_response = audio_lookup.first() except WordNotFound: pass except WordnikAPIError as e: event.reply(e.user_msg()) raise else: url = web.try_shorten(audio_response['fileUrl']) out += " - {}".format(url) return out
def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items,".format(len(playlist['tracks'])) seconds = round(int(playlist['duration']) / 1000) out += " {}".format(timeformat.format_time(seconds, simple=True)) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def locate(text): """<location> - Finds <location> on Google Maps.""" api_key = bot.config.get_api_key("google").get('access', None) if not api_key: return "This command requires a Google Developers Console API key." # Use the Geocoding API to get co-ordinates from the input params = {"address": text, "key": api_key} bias = bot.config.get('region_bias_cc', None) if bias: params['region'] = bias json = http.get_json(geocode_api, params=params) error = check_status(json['status']) if error: return error result = json['results'][0] location_name = result['formatted_address'] location = result['geometry']['location'] formatted_location = "{lat},{lng},16z".format(**location) url = web.try_shorten("https://google.com/maps/@" + formatted_location + "/data=!3m1!1e3") tags = result['types'] # if 'political' is not the only tag, remove it. if not tags == ['political']: tags = [x for x in result['types'] if x != 'political'] tags = ", ".join(tags).replace("_", " ") return "\x02{}\x02 - {} ({})".format(location_name, url, tags)
def stock(text): """<symbol> - Looks up stock information""" api_key = bot.config.get_api_key("alphavantage") if not api_key: return "This command requires an Alpha Vantage API key." params = {'function': 'GLOBAL_QUOTE', 'apikey': api_key, 'symbol': text} quote = http.get_json(url, params=params) if not quote.get("Global Quote"): return "Unknown ticker symbol '{}'".format(text) quote = {k.split(' ')[-1]:tryParse(v) for k,v in quote['Global Quote'].items()} quote['url'] = web.try_shorten('https://finance.yahoo.com/quote/' + text) try: if float(quote['change']) < 0: quote['color'] = "5" else: quote['color'] = "3" return "{symbol} - ${price:.2f} " \ "\x03{color}{change:+.2f} ({percent:.2f}%)\x0F " \ "H:${high:.2f} L:${low:.2f} O:${open:.2f} " \ "Volume:{volume} - {url}".format(**quote) except: return "Error parsing return data, please try again later."
def issue_cmd(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() owner, repo = parse_url(args[0] if args[0] not in shortcuts else shortcuts[args[0]]) issue = args[1] if len(args) > 1 else None if issue: r = requests.get("https://api.github.com/repos/{}/{}/issues/{}".format( owner, repo, issue)) r.raise_for_status() j = r.json() url = web.try_shorten(j["html_url"], service="git.io") number = j["number"] title = j["title"] summary = formatting.truncate(j["body"].split("\n")[0], 25) if j["state"] == "open": state = "\x033\x02Opened\x02\x0f by {}".format(j["user"]["login"]) else: state = "\x034\x02Closed\x02\x0f by {}".format( j["closed_by"]["login"]) return "Issue #{} ({}): {} | {}: {}".format(number, state, url, title, summary) r = requests.get("https://api.github.com/repos/{}/{}/issues".format( owner, repo)) r.raise_for_status() j = r.json() count = len(j) if count == 0: return "Repository has no open issues." return "Repository has {} open issues.".format(count)
def pronounce(text): """<word> -- Returns instructions on how to pronounce <word> with an audio example.""" if not api_key: return "This command requires an API key from wordnik.com." word = sanitize(text) url = API_URL + "word.json/{}/pronunciations".format(word) params = {"api_key": api_key, "limit": 5} json = requests.get(url, params=params).json() if json: out = "\x02{}\x02: ".format(word) out += " • ".join([i["raw"] for i in json]) else: return "Sorry, I don't know how to pronounce \x02{}\x02.".format(word) url = API_URL + "word.json/{}/audio".format(word) params = {"api_key": api_key, "limit": 1, "useCanonical": "false"} json = requests.get(url, params=params).json() if json: url = web.try_shorten(json[0]["fileUrl"]) out += " - {}".format(url) return " ".join(out.split())
def chars(text, nick, bot, notice): text = nick if not text else text text = text.split(' ') CONSOLE2ID = {"xbox": 1, "playstation": 2} err_msg = 'Invalid use of chars command. Use: !chars <nick> or !chars <gamertag> <psn/xbl>' target = compile_stats_arg_parse(text, nick, cached=False) if target['stats'] or target['split']: return err_msg characterHash = target['user'] if type(characterHash) is not dict: return 'A user by the name {} was not found.'.format(text[0]) output = [] for console in characterHash: if target['console'] and CONSOLE2ID[target['console']] != console: print("{0} is not {1}".format(console, target['console'])) continue console_output = [] for char in characterHash[console]['characters']: console_output.append('✦{} // {} // {} - {}'.format( characterHash[console]['characters'][char]['LL'], characterHash[console]['characters'][char]['class'], characterHash[console]['characters'][char]['race'], try_shorten( 'https://www.bungie.net/en/Legend/Gear/{}/{}/{}'.format( console, characterHash[console]['membershipId'], char)))) output.append('{}: {}'.format(CONSOLES[console - 1], ' || '.join(console_output))) return "\x02{0}\x02: {1}".format(target['nick'], ' ; '.join(output))
def news(bot, chan, text=None): global CACHE if text: channels = CACHE.get(DB_Q.channels.exists()) if 'unsubscribe' in text.lower(): if channels: if chan in channels['channels']: channels['channels'].remove(chan) CACHE.update(channels, eids=[channels.eid]) return 'Successfully unsubscribed!' elif 'subscribe' in text.lower(): if not channels: CACHE.insert({'channels': [chan]}) elif chan not in channels['channels']: channels['channels'].append(chan) CACHE.update(channels, eids=[channels.eid]) return 'Successfully subscribed!' feed = parse( 'https://www.bungie.net/en/Rss/NewsByCategory?category=destiny¤tpage=1&itemsPerPage=1' ) if not feed.entries: return 'Feed not found.' return '{} - {}'.format(feed['entries'][0]['summary'], try_shorten(feed['entries'][0]['link']))
def rss(text, message): """rss <feed> -- Gets the first three items from the RSS feed <feed>.""" limit = 3 # preset news feeds strip = text.lower().strip() if strip == "bukkit": feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss" limit = 1 elif strip == "xkcd": feed = "http://xkcd.com/rss.xml" elif strip == "ars": feed = "http://feeds.arstechnica.com/arstechnica/index" else: feed = text query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit" result = web.query(query, {"feed": feed, "limit": limit}) if not result.rows: return "Could not find/read RSS feed." for row in result.rows: title = formatting.truncate_str(row["title"], 100) link = web.try_shorten(row["link"]) message("{} - {}".format(title, link))
def pronounce(text): """<word> - Returns instructions on how to pronounce <word> with an audio example.""" if not api_key: return "This command requires an API key from wordnik.com." word = sanitize(text) url = API_URL + "word.json/{}/pronunciations".format(word) params = {'api_key': api_key, 'limit': 5} json = requests.get(url, params=params).json() if json: out = "\x02{}\x02: ".format(text) out += " • ".join([i['raw'] for i in json]) else: return "Sorry, I don't know how to pronounce \x02{}\x02.".format(text) url = API_URL + "word.json/{}/audio".format(word) params = {'api_key': api_key, 'limit': 1, 'useCanonical': 'false'} json = requests.get(url, params=params).json() if json: url = web.try_shorten(json[0]['fileUrl']) out += " - {}".format(url) return " ".join(out.split())
def validate(text): """validate <url> -- Runs url through the W3C Markup Validator.""" warning_count = 0 error_count = 0 text = text.strip() if not urllib.parse.urlparse(text).scheme: text = "http://" + text url = api_url + '?uri=' + text url = web.try_shorten(url) params = {'uri': text, 'output': 'json'} request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Failed to fetch info: {}".format(request.status_code) response = request.json() response = response['messages'] for mess in response: if mess.get("subType", None) == "warning": warning_count += 1 if mess.get("type", None) == "error": error_count += 1 out_warning = "warnings" if warning_count > 1 else "warning" out_error = "errors" if error_count > 1 else "error" out = "{} has {} {} and {} {} ({})".format(text, warning_count, out_warning, error_count, out_error, url) return out
def issue(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format( repo, issue)) r.raise_for_status() j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 25) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format( j['closed_by']['login']) return 'Issue #{} ({}): {} | {}: {}'.format(number, state, url, title, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) r.raise_for_status() j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.' else: return 'Repository has {} open issues.'.format(count)
def issue(text): """ghissue <username|repo> [number] - gets issue [number]'s summary from GitHub, or the open issue count if no issue is specified.""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format(repo, issue)) j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 25) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format(j['closed_by']['login']) return 'Issue #{} ({}): {} | {}: {}'.format(number, state, url, title, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.' else: return 'Repository has {} open issues.'.format(count)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate_str(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def collection(text, nick, bot): if text: if text.split(' ').pop().lower() in ['xb1','xb','xbl','xbox']: membership = get_user(' '.join(text.split(' ')[0:len(text.split(' '))-1]),1) links = { 1: membership[1]['displayName']} elif text.split(' ').pop().lower() in ['psn','ps','playstation','ps4']: membership = get_user(' '.join(text.split(' ')[0:len(text.split(' '))-1]),2) links = { 2: membership[2]['displayName']} else: membership = get_user(text) if type(membership) == str: return 'A user by the name of {} was not found. Try specifying platform: psn or xbl'.format(text) links = CACHE['links'].get(text) else: membership = get_user(nick) links = CACHE['links'].get(nick) if type(membership) == str: return membership output = [] for console in membership: grimoire = get( '{}Vanguard/Grimoire/{}/{}/' .format(BASE_URL, console, membership[console]['membershipId']), headers=HEADERS ).json()['Response']['data'] found_frags = [] ghosts = 0 for card in grimoire['cardCollection']: if 'fragments' not in CACHE['collections']: # XXX: don't allow !collections to be broken # because of bad cache prepare_lore_cache() if card['cardId'] in CACHE['collections']['fragments']: found_frags.append([card['cardId']]) elif card['cardId'] == 103094: ghosts = card['statisticCollection'][0]['displayValue'] if int(ghosts) >= 99: ghosts = 99 if console == 1: platform = "xbl" else: platform = "psn" output.append('{}: Grimoire {}/{}, Ghosts {}/{}, Fragments {}/{} - {}'.format( CONSOLES[console - 1], grimoire['score'], CACHE['collections']['grim_tally'], ghosts, CACHE['collections']['ghost_tally'], len(found_frags), len(CACHE['collections']['fragments']), try_shorten('http://destinystatus.com/{}/{}/grimoire'.format( platform, links[console] )) )) return output
def get_info(url): if not url.startswith('//') and '://' not in url: url = 'http://' + url try: mimetype, encoding = mimetypes.guess_type(url) if mimetype and any( mimetype.startswith(t) for t in ['video', 'audio', 'image']): return web.try_shorten(url), None title = http.get_title(url) title = u' '.join(re.sub(u'\r|\n', u' ', title).split()).strip('| ') return web.try_shorten(url), title or None except Exception as e: print(e) return web.try_shorten(url), None
def news(bot): feed = parse('https://www.bungie.net/en/Rss/NewsByCategory?category=destiny¤tpage=1&itemsPerPage=1') if not feed.entries: return 'Feed not found.' return '{} - {}'.format( feed['entries'][0]['summary'], try_shorten(feed['entries'][0]['link']))
def format_game(app_id, show_url=True): """ Takes a Steam Store app ID and returns a formatted string with data about that app ID :type app_id: string :return: string """ params = {'appids': app_id} try: data = http.get_json(API_URL, params=params, timeout=15) except Exception as e: return f"Could not get game info: {e}" game = data[app_id]["data"] # basic info out = ["\x02{}\x02".format(game["name"])] desc = " ".join(formatting.strip_html(game["about_the_game"]).split()) out.append(formatting.truncate(desc, 75)) # genres try: genres = ", ".join([g['description'] for g in game["genres"]]) out.append("\x02{}\x02".format(genres)) except KeyError: # some things have no genre pass # release date if game['release_date']['coming_soon']: out.append("coming \x02{}\x02".format(game['release_date']['date'])) else: out.append("released \x02{}\x02".format(game['release_date']['date'])) # pricing if game['is_free']: out.append("\x02free\x02") elif not game.get("price_overview"): # game has no pricing, it's probably not released yet pass else: price = game['price_overview'] # the steam API sends prices as an int like "9999" for $19.99, we divmod to get the actual price if price['final'] == price['initial']: out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = "$%d.%02d" % divmod(price['final'], 100) price_original = "$%d.%02d" % divmod(price['initial'], 100) out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original)) if show_url: url = web.try_shorten(STORE_URL.format(game['steam_appid'])) out.append(url) return " - ".join(out)
def test_try_shorten(mock_requests): mock_requests.add( mock_requests.GET, "http://is.gd/create.php", json={"shorturl": "https://is.gd/foobar"}, ) from cloudbot.util import web assert (web.try_shorten("https://example.com", service="is.gd") == "https://is.gd/foobar") mock_requests.replace( mock_requests.GET, "http://is.gd/create.php", json={"errormessage": "Error occurred"}, ) assert (web.try_shorten("https://example.com", service="is.gd") == "https://example.com")
def meh(): """- List the current meh.com deal.""" url = "https://meh.com/deals.rss" feed = feedparser.parse(url) title = feed.entries[0].title link = web.try_shorten(feed.entries[0].link) return "meh.com: {} ({})".format(title, link)
def sonhos(text): """sonhos <phrase> [id] -- Looks up <phrase> on www.sonhos.com.br.""" if text: # clean and split the input text = text.lower().strip() parts = text.split() # if the last word is a number, set the ID to that number if parts[-1].isdigit(): id_num = int(parts[-1]) # remove the ID from the input string del parts[-1] else: id_num = 1 text = "-".join(parts) # fetch the definitions try: url = "http://www.sonhos.com.br/sonhar-com-"+unidecode.unidecode(text) request = requests.get(url) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get definition: {}".format(e) page = request.text definitions = re.findall("<span class=\"text-post\">(.*?)</span>", page, re.DOTALL) if definitions: try: definition = definitions[id_num - 1] except IndexError: return 'Não encontrado.' def_found = re.findall("Significado Sonhar com (.+?)<p>", definition, re.DOTALL) if len(def_found) == 0: def_found = re.findall("Significado de sonhar com (.+?)\.", definition, re.DOTALL) if len(def_found) == 0: def_found = re.findall("<p><p><strong>Sonhar com (.+?)</strong>", definition, re.DOTALL) if len(def_found) == 0: def_found = text.capitalize().strip() else: def_found = def_found[0].capitalize().strip() def_text = sanitize(definition) short_url = web.try_shorten(url) output = "[{}/{}] \x02{}\x02: {} - {}".format(id_num, len(definitions), def_found, def_text, short_url) else: output = 'Não achei nada com o termo \x02' + text + '\x02.' return output
def forecast(reply, db, event): """<location> - Gets forecast data for <location>.""" res, err = check_and_parse(event, db) if not res: return err location_data, fio = res daily_conditions = fio.get_daily()['data'] today, tomorrow, *three_days = daily_conditions[:5] today['name'] = 'Today' tomorrow['name'] = 'Tomorrow' for day_fc in (today, tomorrow): wind_speed = day_fc['windSpeed'] day_fc.update( wind_direction=bearing_to_card(day_fc['windBearing']), wind_speed_mph=wind_speed, wind_speed_kph=mph_to_kph(wind_speed), summary=day_fc['summary'].rstrip('.'), ) for fc_data in (today, tomorrow, *three_days): high = fc_data['temperatureHigh'] low = fc_data['temperatureLow'] fc_data.update( temp_high_f=round_temp(high), temp_high_c=round_temp(convert_f2c(high)), temp_low_f=round_temp(low), temp_low_c=round_temp(convert_f2c(low)), ) parts = [ ('High', "{temp_high_f:.0f}F/{temp_high_c:.0f}C"), ('Low', "{temp_low_f:.0f}F/{temp_low_c:.0f}C"), ('Humidity', "{humidity:.0%}"), ('Wind', "{wind_speed_mph:.0f}MPH/{wind_speed_kph:.0f}KPH {wind_direction}"), ] day_str = colors.parse("$(b){name}$(b): {summary}; ") + '; '.join( '{}: {}'.format(part[0], part[1]) for part in parts) url = web.try_shorten( 'https://darksky.net/forecast/{lat:.3f},{lng:.3f}'.format_map( location_data)) out_format = "{today_str} | {tomorrow_str} -- {place} - $(ul){url}$(clear)" reply( colors.parse(out_format).format( today_str=day_str.format_map(today), tomorrow_str=day_str.format_map(tomorrow), place=location_data['address'], url=url))
def weather(reply, db, triggered_prefix, event): """<location> - Gets weather data for <location>.""" res, err = check_and_parse(event, db) if not res: return err location_data, fio = res daily_conditions = fio.get_daily()['data'] current = fio.get_currently() today = daily_conditions[0] wind_speed = current['windSpeed'] today_high = today['temperatureHigh'] today_low = today['temperatureLow'] current.update( name='Current', wind_direction=bearing_to_card(current['windBearing']), wind_speed_mph=wind_speed, wind_speed_kph=mph_to_kph(wind_speed), summary=current['summary'].rstrip('.'), temp_f=round_temp(current['temperature']), temp_c=round_temp(convert_f2c(current['temperature'])), temp_high_f=round_temp(today_high), temp_high_c=round_temp(convert_f2c(today_high)), temp_low_f=round_temp(today_low), temp_low_c=round_temp(convert_f2c(today_low)), ) parts = [ ('Current', "{summary}, {temp_f}F/{temp_c}C"), ('High', "{temp_high_f}F/{temp_high_c}C"), ('Low', "{temp_low_f}F/{temp_low_c}C"), ('Humidity', "{humidity:.0%}"), ('Wind', "{wind_speed_mph:.0f}MPH/{wind_speed_kph:.0f}KPH {wind_direction}"), ] current_str = '; '.join( colors.parse('$(b){}$(b): {}$(clear)'.format(part[0], part[1])) for part in parts) url = web.try_shorten( 'https://darksky.net/forecast/{lat:.3f},{lng:.3f}'.format_map( location_data)) reply( colors.parse("{current_str} -- " "{place} - " "$(ul){url}$(clear) " "($(i)To get a forecast, use {cmd_prefix}fc$(i))").format( place=location_data['address'], current_str=current_str.format_map(current), url=url, cmd_prefix=triggered_prefix, ))
def imgur(text): """[search term] / [/r/subreddit] / [/user/username] / memes / random - returns a link to a random imgur image based on your input. if no input is given the bot will get an image from the imgur frontpage """ text = text.strip().lower() if not imgur_api: return "No imgur API details" if text == "apicredits": return imgur_api.credits items, is_reddit = get_items(text) if not items: return "No results found." # if the item has no title, we don't want it. ugh >_> items = [item for item in items if item.title] random.shuffle(items) item = random.choice(items) tags = [] # remove unslightly full stops if item.title.endswith("."): title = item.title[:-1] else: title = item.title # if it's an imgur meme, add the meme name # if not, AttributeError will trigger and code will carry on with suppress(AttributeError): title = "\x02{}\x02 - {}".format( item.meme_metadata["meme_name"].lower(), title) # if the item has a tag, show that if item.section: tags.append(item.section) # if the item is nsfw, show that if item.nsfw: tags.append("nsfw") # if the search was a subreddit search, add the reddit comment link if is_reddit: reddit_url = web.try_shorten("http://reddit.com" + item.reddit_comments) url = "{} ({})".format(item.link, reddit_url) else: url = "{}".format(item.link) tag_str = "[\x02" + ("\x02, \x02".join(tags)) + "\x02] " if tags else "" return '{}"{}" - {}'.format(tag_str, title, url)
def slickdeals(): """- List the top 3 frontpage slickdeals.net deals.""" url = "https://slickdeals.net/newsearch.php?mode=frontpage&searcharea=deals&searchin=first&rss=1" feed = feedparser.parse(url) items = ("{} ({})".format(item.title, web.try_shorten(item.link)) for item in feed.entries[:3]) out = "slickdeals.net: " + ' \u2022 '.join(items) return out
def twitter_url(match, message): try: api_key = bot.config.get_api_key("twitter") request_url = 'https://api.twitter.com/1.1/statuses/show.json' params = {'id': match.group(2), 'tweet_mode': 'extended'} tweet = http.get_json(request_url, params=params, oauth=True, oauth_keys=api_key) tweet['full_text'] = http.h.unescape(tweet['full_text']) if tweet['full_text'].count('\n') > 0: tweet['full_text'] = re.sub(r'(.*?)(https:\/\/t.co\/.*)', r'\1\n\2', tweet['full_text']) message(u'{} - {} (@{}) on Twitter:'.format(web.try_shorten(match.group(0)), tweet['user']['name'], tweet['user']['screen_name'])) for line in tweet['full_text'].split('\n'): if len(line.strip()) > 0: message(u' {}'.format(line)) else: message(u'{} - {} (@{}) on Twitter: "{}"'.format(web.try_shorten(match.group(0)), tweet['user']['name'], tweet['user']['screen_name'], tweet['full_text'].replace('\n', ' | '))) except: message("{} - Twitter".format(web.try_shorten(match.group(0))))
def format_data(app_id, show_url=True): """ takes a steam appid and returns a formatted string with info :param appid: string :return: string """ params = {'appids': app_id} try: request = requests.get(API_URL, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get game info: {}".format(e) data = request.json() game = data[app_id]["data"] out = [] # basic info out.append("\x02{}\x02".format(game["name"])) desc = formatting.strip_html(game["about_the_game"]) out.append(formatting.truncate_str(desc, 70)) # genres genres = ", ".join([g['description'] for g in game["genres"]]) out.append("\x02{}\x02".format(genres)) # release date if game['release_date']['coming_soon']: out.append("coming \x02{}\x02".format(game['release_date']['date'])) else: out.append("released \x02{}\x02".format(game['release_date']['date'])) # pricing if game['is_free']: out.append("\x02free\x02") else: price = game['price_overview'] if price['final'] == price['initial']: out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = "$%d.%02d" % divmod(price['final'], 100) price_original = "$%d.%02d" % divmod(price['initial'], 100) out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original)) if show_url: url = web.try_shorten(STORE_URL.format(game['steam_appid'])) out.append(url) return " - ".join(out)
def wolframalpha(text, bot, reply): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) try: request.raise_for_status() except HTTPError as e: reply("Error getting query: {}".format(e.response.status_code)) raise if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def answer(text): """answer <query> -- find the answer to a question on Yahoo! Answers""" query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1" result = web.query(query, {"query": text.strip()}).one() short_url = web.try_shorten(result["Link"]) # we split the answer and .join() it to remove newlines/extra spaces answer_text = formatting.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)
def imgur(text): """[search term] / [/r/subreddit] / [/user/username] / memes / random - returns a link to a random imgur image based on your input. if no input is given the bot will get an image from the imgur frontpage """ text = text.strip().lower() if not imgur_api: return "No imgur API details" if text == "apicredits": return imgur_api.credits items, is_reddit = get_items(text) if not items: return "No results found." # if the item has no title, we don't want it. ugh >_> items = [item for item in items if item.title] random.shuffle(items) item = random.choice(items) tags = [] # remove unslightly full stops if item.title.endswith("."): title = item.title[:-1] else: title = item.title # if it's an imgur meme, add the meme name # if not, AttributeError will trigger and code will carry on with suppress(AttributeError): title = "\x02{}\x02 - {}".format(item.meme_metadata["meme_name"].lower(), title) # if the item has a tag, show that if item.section: tags.append(item.section) # if the item is nsfw, show that if item.nsfw: tags.append("nsfw") # if the search was a subreddit search, add the reddit comment link if is_reddit: reddit_url = web.try_shorten("http://reddit.com" + item.reddit_comments) url = "{} ({})".format(item.link, reddit_url) else: url = "{}".format(item.link) tag_str = "[\x02" + ("\x02, \x02".join(tags)) + "\x02] " if tags else "" return '{}"{}" - {}'.format(tag_str, title, url)
def qrcode(text): """<link> - returns a link to a QR code image for <link>""" args = { "cht": "qr", # chart type (QR) "chs": "200x200", # dimensions "chl": text # data } argstring = urllib.parse.urlencode(args) link = "http://chart.googleapis.com/chart?{}".format(argstring) return web.try_shorten(link)
def format_group(group, show_url=True): """ Takes a SoundCloud group and returns a formatting string. """ out = "\x02{}\x02".format(group['name']) if group['description']: out += ': "{}"'.format(formatting.truncate(group['description'])) out += " - Owned by \x02{}\x02.".format(group['creator']['username']) if show_url: out += " - {}".format(web.try_shorten(group['permalink_url'])) return out
def collection(text, nick, bot): global LINKS text = nick if not text else text membership = get_user(text) links = LINKS.get(DB_Q.nick == text) if type(membership) == str: return membership output = [] for console in membership: grimoire = get( '{}Vanguard/Grimoire/{}/{}/' .format(BASE_URL, console, membership[console]['membershipId']), headers=HEADERS ).json()['Response']['data'] found_frags = [] ghosts = 0 fragments = COLLECTIONS.get(DB_Q.fragments.exists())['fragments'] if not fragments: prepare_lore_cache() fragments = COLLECTIONS.get(DB_Q.fragments.exists())['fragments'] for card in grimoire['cardCollection']: if card['cardId'] in fragments: found_frags.append([card['cardId']]) elif card['cardId'] == 103094: ghosts = card['statisticCollection'][0]['displayValue'] if int(ghosts) >= 99: ghosts = 99 if console == 1: platform = "xbl" else: platform = "psn" output.append('{}: Grimoire {}/{}, Ghosts {}/{}, Fragments {}/{} - {}'.format( CONSOLES[console - 1], grimoire['score'], COLLECTIONS.get(DB_Q.grim_tally.exists())['grim_tally'], ghosts, COLLECTIONS.get(DB_Q.ghost_tally.exists())['ghost_tally'], len(found_frags), len(fragments), try_shorten('http://destinystatus.com/{}/{}/grimoire'.format( platform, links[console] )) )) return output
def steamcalc(text): """steamcalc <username> - Gets value of steam account. Uses steamcommunity.com/id/<nickname>.""" user = text.strip().lower() try: data = get_data(user) except SteamError as e: return "{}".format(e) data["short_url"] = web.try_shorten(data["url"]) return "\x02{name}\x02 has \x02{count:,}\x02 games with a total value of \x02{value}\x02" \ " (\x02{value_sales}\x02 during sales). \x02{count_unplayed:,}\x02" \ " (\x02{percent_unplayed}%\x02) have never been played - {short_url}".format(**data)
def rotten_tomatoes(text, bot): """rt <title> -- gets ratings for <title> from Rotten Tomatoes""" api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) if not api_key: return "No Rotten Tomatoes API key set." title = text.strip() params = { 'q': title, 'apikey': api_key } request = requests.get(movie_search_url, params=params) if request.status_code != requests.codes.ok: return "Error searching: {}".format(request.status_code) results = request.json() if results['total'] == 0: return 'No results.' movie = results['movies'][0] title = movie['title'] movie_id = movie['id'] critics_score = movie['ratings']['critics_score'] audience_score = movie['ratings']['audience_score'] url = web.try_shorten(movie['links']['alternate']) if critics_score == -1: return "\x02{}\x02 - Critics Rating: \x02No Reviews\x02, " \ "Audience Rating: \x02{}%\x02 - {}".format(title, audience_score, url) review_params = { 'review_type': 'all', 'apikey': api_key } review_request = requests.get(movie_reviews_url.format(movie_id), params=review_params) if review_request.status_code != requests.codes.ok: return "Error searching: {}".format(review_request.status_code) reviews = review_request.json() review_count = reviews['total'] fresh = int(critics_score * review_count / 100) rotten = review_count - fresh return "\x02{}\x02 - Critics Rating: \x02{}%\x02 ({} liked, {} disliked), " \ "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)
def wolframalpha(text, bot): """wa <query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" url = 'http://api.wolframalpha.com/v2/query?format=plaintext' result = http.get_xml(url, input=text, appid=api_key) # get the URL for a user to view this query in a browser query_url = "http://www.wolframalpha.com/input/?i=" + \ http.quote_plus(text.encode('utf-8')) short_url = web.try_shorten(query_url) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' ret = re.sub(r'\\(.)', r'\1', ret) def unicode_sub(match): return chr(int(match.group(1), 16)) ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret) ret = formatting.truncate_str(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def wolframalpha(text, bot): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def lyricsnmusic(text,bot): """lyrics <artist and/or song> will fetch the first 150 characters of a song and a link to the full lyrics.""" api_key = bot.config.get("api_keys", {}).get("lyricsnmusic") params = { "api_key": api_key, "q": text} r = requests.get(api_url, params=params) if r.status_code != 200: return "There was an error returned by the LyricsNMusic API." r = r.json() snippet = r[0]["snippet"].replace("\r\n"," ") url = web.try_shorten(r[0]["url"]) title = r[0]["title"] viewable = r[0]["viewable"] out = "\x02{}\x02 -- {} {}".format(title, snippet, url) if not viewable: out += " Full lyrics not available." return out
def spartist(text): """spartist <artist> -- Search Spotify for <artist>""" params = {"q": text.strip()} request = requests.get("http://ws.spotify.com/search/1/artist.json", params=params) if request.status_code != requests.codes.ok: return "Could not get artist information: {}".format(request.status_code) data = request.json() try: _type, _id = data["artists"][0]["href"].split(":")[1:] except IndexError: return "Could not find artist." url = web.try_shorten(gateway.format(_type, _id)) return "\x02{}\x02 - {}".format(data["artists"][0]["name"], url)
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate(item["Title"], 60) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item.get("IsPremierItem"): tags.append("\x02Premier\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_shorten(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def drink(text, chan, action): """<nick>, makes the user a random cocktail.""" index = random.randint(0,len(drinks)-1) drink = drinks[index]['title'] url = web.try_shorten(drinks[index]['url']) if drink.endswith(' recipe'): drink = drink[:-7] contents = drinks[index]['ingredients'] directions = drinks[index]['directions'] out = "grabs some" for x in contents: if x == contents[len(contents)-1]: out += " and {}".format(x) else: out += " {},".format(x) out += "\x0F and makes {} a(n) \x02{}\x02. {}".format(text, drink, url) action(out, chan)
def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key})) if data['description']: desc = ": {} ".format(formatting.truncate_str(data['description'], 50)) else: desc = "" if data['genre']: genre = "- Genre: \x02{}\x02 ".format(data['genre']) else: genre = "" url = web.try_shorten(data['permalink_url']) return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'], data['comment_count'], url)
def spalbum(text): """spalbum <album> -- Search Spotify for <album>""" params = {'q': text.strip()} request = requests.get('http://ws.spotify.com/search/1/album.json', params=params) if request.status_code != requests.codes.ok: return "Could not get album information: {}".format(request.status_code) data = request.json() try: _type, _id = data["albums"][0]["href"].split(":")[1:] except IndexError: return "Could not find album." url = web.try_shorten(gateway.format(_type, _id)) uri = data["albums"][0]["href"] return "\x02{}\x02 by \x02{}\x02 - {} URI: {}".format(data["albums"][0]["name"], data["albums"][0]["artists"][0]["name"], url, uri)
def sptfy(inp, sptfy=False): if sptfy: shortenurl = "http://sptfy.com/index.php" data = urlencode({'longUrl': inp, 'shortUrlDomain': 1, 'submitted': 1, "shortUrlFolder": 6, "customUrl": "", "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0}) try: soup = http.get_soup(shortenurl, post_data=data, cookies=True) except: return inp try: link = soup.find('div', {'class': 'resultLink'}).text.strip() return link except: message = "Unable to shorten URL: {}".format(soup.find('div', { 'class': 'messagebox_text'}).find('p').text.split("<br/>")[0]) return message else: return web.try_shorten(inp)
def define(text): """<word> -- Returns a dictionary definition from Wordnik for <word>.""" if not api_key: return "This command requires an API key from wordnik.com." word = sanitize(text) url = API_URL + "word.json/{}/definitions".format(word) params = {"api_key": api_key, "limit": 1} json = requests.get(url, params=params).json() if json: data = json[0] data["word"] = " ".join(data["word"].split()) data["url"] = web.try_shorten(WEB_URL.format(data["word"])) data["attrib"] = ATTRIB_NAMES[data["sourceDictionary"]] return "\x02{word}\x02: {text} - {url} ({attrib})".format(**data) else: return "I could not find a definition for \x02{}\x02.".format(word)
def format_track(track, show_url=True): """ Takes a SoundCloud track item and returns a formatted string. """ out = track['title'] out += " by \x02{}\x02".format(track['user']['username']) if track['genre']: out += " - \x02{}\x02".format(track['genre']) out += " - \x02{:,}\x02 plays, \x02{:,}\x02 favorites, \x02{:,}\x02 comments".format(track['playback_count'], track['favoritings_count'], track['comment_count']) if show_url: out += " - {}".format(web.try_shorten(track['permalink_url'])) return out