def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02.".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items.".format(len(playlist['tracks'])) seconds = round(int(playlist['duration'])/1000) out += " Running Time: {}.".format(timeformat.format_time(seconds, simple=True)) out += " Playlist Type: \x02{}\x02.".format(playlist['type']) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def format_game(app_id, show_url=True): """ Takes a Steam Store app ID and returns a formatted string with data about that app ID :type app_id: string :return: string """ params = {'appids': app_id} try: request = requests.get(API_URL, params=params, timeout=15) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get game info: {}".format(e) data = request.json() game = data[app_id]["data"] # basic info out = ["\x02{}\x02".format(game["name"])] desc = " ".join(formatting.strip_html(game["about_the_game"]).split()) out.append(formatting.truncate(desc, 75)) # genres try: genres = ", ".join([g['description'] for g in game["genres"]]) out.append("\x02{}\x02".format(genres)) except KeyError: # some things have no genre pass # release date if game['release_date']['coming_soon']: out.append("coming \x02{}\x02".format(game['release_date']['date'])) else: out.append("released \x02{}\x02".format(game['release_date']['date'])) # pricing if game['is_free']: out.append("\x02free\x02") elif not game.get("price_overview"): # game has no pricing, it's probably not released yet pass else: price = game['price_overview'] # the steam API sends prices as an int like "9999" for $19.99, we divmod to get the actual price if price['final'] == price['initial']: out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = "$%d.%02d" % divmod(price['final'], 100) price_original = "$%d.%02d" % divmod(price['initial'], 100) out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original)) if show_url: url = web.try_shorten(STORE_URL.format(game['steam_appid'])) out.append(url) return " - ".join(out)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) - {}".format(name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) - {}".format(name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} - {}".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def pronounce(text): """<word> -- Returns instructions on how to pronounce <word> with an audio example.""" if not api_key: return "This command requires an API key from wordnik.com." word = sanitize(text) url = API_URL + "word.json/{}/pronunciations".format(word) params = { 'api_key': api_key, 'limit': 5 } json = requests.get(url, params=params).json() if json: out = "\x02{}\x02: ".format(word) out += " • ".join([i['raw'] for i in json]) else: return "Sorry, I don't know how to pronounce \x02{}\x02.".format(word) url = API_URL + "word.json/{}/audio".format(word) params = { 'api_key': api_key, 'limit': 1, 'useCanonical': 'false' } json = requests.get(url, params=params).json() if json: url = web.try_shorten(json[0]['fileUrl']) out += " - {}".format(url) return " ".join(out.split())
def issue(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format(repo, issue)) j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 200) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format(j['closed_by']['login']) return '{}: ({}) | {} | {}'.format(title, state, url, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.'.format(count) else: return 'Repository has {} open issues.'.format(count)
def lastfm_track(text, nick, db, bot, notice): """Grabs a list of the top tracks for a last.fm username""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "Error: This command requires a LastFM API key." artist = "" track = "" if text: params = text.split(',') if len(params) < 2: notice( "Please specify an artist and track title in the form artist name, track name.", nick) return else: artist = params[0] track = params[1] else: notice( "Please specify an artist and track title in the form artist name, track name.", nick) return username = get_account(nick) if username: params = { 'api_key': api_key, 'method': 'track.getInfo', 'artist': artist, 'track': track, 'username': username, 'autocorrect': 1 } else: params = { 'api_key': api_key, 'method': 'track.getInfo', 'artist': artist, 'track': track, 'autocorrect': 1 } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Failed to fetch info ({})".format(request.status_code) response = request.json() if 'error' in response: return "Error: {}.".format(response["message"]) track_name = response["track"]["name"] artist_name = response["track"]["artist"]["name"] album_name = response["track"]["album"]["title"] url = web.try_shorten(response["track"]["url"]) listeners = response["track"]["listeners"] playcount = response["track"]["playcount"] out = out = "'{}' from the album {} by {} has been played {} times by {} listeners. {}".format( track_name, album_name, artist_name, playcount, listeners, url) if 'userplaycount' in response["track"]: userplaycount = response["track"]["userplaycount"] out = "'{}' from the album {} by {} has been played {} times by {} listeners. {} has listened {} times. {}".format( track_name, album_name, artist_name, playcount, listeners, username, userplaycount, url) return out
def imgur(text): """[search term] / [/r/subreddit] / [/user/username] / memes / random - returns a link to a random imgur image based on your input. if no input is given the bot will get an image from the imgur frontpage """ text = text.strip().lower() if not imgur_api: return "No imgur API details" if text == "apicredits": return imgur_api.credits items, is_reddit = get_items(text) if not items: return "No results found." # if the item has no title, we don't want it. ugh >_> items = [item for item in items if item.title] random.shuffle(items) item = random.choice(items) tags = [] # remove unslightly full stops if item.title.endswith("."): title = item.title[:-1] else: title = item.title # if it's an imgur meme, add the meme name # if not, AttributeError will trigger and code will carry on with suppress(AttributeError): title = "\x02{}\x02 - {}".format(item.meme_metadata["meme_name"].lower(), title) # if the item has a tag, show that if item.section: tags.append(item.section) # if the item is nsfw, show that if item.nsfw: tags.append("nsfw") # if the search was a subreddit search, add the reddit comment link if is_reddit: reddit_url = web.try_shorten("http://reddit.com" + item.reddit_comments) url = "{} ({})".format(item.link, reddit_url) else: url = "{}".format(item.link) tag_str = "[\x02" + ("\x02, \x02".join(tags)) + "\x02] " if tags else "" return '{}"{}" - {}'.format(tag_str, title, url)
def qrcode(text): """<link> - returns a link to a QR code image for <link>""" args = { "cht": "qr", # chart type (QR) "chs": "200x200", # dimensions "chl": text # data } argstring = urllib.parse.urlencode(args) link = "http://chart.googleapis.com/chart?{}".format(argstring) return web.try_shorten(link)
def format_group(group, show_url=True): """ Takes a SoundCloud group and returns a formatting string. """ out = "\x02{}\x02".format(group['name']) if group['description']: out += ': "{}"'.format(formatting.truncate(group['description'])) out += " - Owned by \x02{}\x02.".format(group['creator']['username']) if show_url: out += " - {}".format(web.try_shorten(group['permalink_url'])) return out
def format_repo(repo, show_url=True): """ Takes a GitHub repository and returns a formatted string. """ out = repo['name'] out += " (\x02{}\x02)".format(repo['owner']['login']) out += " \x12|\x12 {} \x12|\x12 {} stars \x12|\x12 {} watchers \x12|\x12 {} forks \x12|\x12 ".format(repo['description'], repo['stargazers_count'], repo['watchers_count']) if show_url: out += " - {}".format(web.try_shorten(repo['html_url'])) return out
def wolframalpha(text, bot): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "Error: This command requires a Wolfram Alpha API key." params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def lastfm_artist(text, nick, db, bot, notice): """<artist> prints information about the specified artist""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "Error: This command requires a LastFM API key." artist = text params = "" if text: pass else: notice("Please specify an artist.", nick) username = get_account(nick) if username: params = { 'api_key': api_key, 'method': 'artist.getInfo', 'artist': artist, 'username': username, 'autocorrect': 1 } else: params = { 'api_key': api_key, 'method': 'artist.getInfo', 'artist': artist, 'autocorrect': 1 } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Failed to fetch info ({})".format(request.status_code) response = request.json() if 'error' in response: return "Error: {}.".format(response["message"]) artist_name = response["artist"]["name"] url = web.try_shorten(response["artist"]["url"]) listeners = response["artist"]["stats"]["listeners"] playcount = response["artist"]["stats"]["playcount"] out = out = "{} has been played {} times by {} listeners. {}".format( artist_name, playcount, listeners, url) if 'userplaycount' in response["artist"]["stats"]: userplaycount = response["artist"]["stats"]["userplaycount"] out = "'{}' has been played {} times by {} listeners. {} has listened {} times. {}".format( artist_name, playcount, listeners, username, userplaycount, url) return out
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate(item["Title"], 60) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item.get("IsPremierItem"): tags.append("\x02Premier\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_shorten(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def spartist(text): """spartist <artist> -- Search Spotify for <artist>""" params = {'q': text.strip()} request = requests.get('http://ws.spotify.com/search/1/artist.json', params=params) if request.status_code != requests.codes.ok: return "Could not get artist information: {}".format(request.status_code) data = request.json() try: _type, _id = data["artists"][0]["href"].split(":")[1:] except IndexError: return "Could not find artist." url = web.try_shorten(gateway.format(_type, _id)) return "\x02{}\x02 - {}".format(data["artists"][0]["name"], url)
def format_track(track, show_url=True): """ Takes a SoundCloud track item and returns a formatted string. """ out = track['title'] out += " by \x02{}\x02".format(track['user']['username']) if track['genre']: out += " - \x02{}\x02".format(track['genre']) out += " - \x02{:,}\x02 plays, \x02{:,}\x02 favorites, \x02{:,}\x02 comments".format(track['playback_count'], track['favoritings_count'], track['comment_count']) if show_url: out += " - {}".format(web.try_shorten(track['permalink_url'])) return out
def recipe(text): """[term] - gets a recipe for [term], or gets a random recipe if no term is specified""" if text: # get the recipe URL by searching try: request = requests.get(SEARCH_URL, params={'query': text.strip()}) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get recipe: {}".format(e) search = bs4.BeautifulSoup(request.text) # find the list of results result_list = search.find('div', {'class': 'found_results'}) if result_list: results = result_list.find_all('div', {'class': 'recipe_result'}) else: return "No results" # pick a random front page result result = random.choice(results) # extract the URL from the result url = BASE_URL + result.find('div', {'class': 'image-wrapper'}).find('a')['href'] else: # get a random recipe URL try: request = requests.get(RANDOM_URL) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get recipe: {}".format(e) url = request.url # use get_data() to get the recipe info from the URL try: data = get_data(url) except ParseError as e: return "Could not parse recipe: {}".format(e) name = data.name.strip() return "Try eating \x02{}!\x02 - {}".format(name, web.try_shorten(url))
def define(text): """<word> -- Returns a dictionary definition for <word>.""" if not api_key: return "This command requires an API key from wordnik.com." word = sanitize(text) url = API_URL + "word.json/{}/definitions".format(word) params = { 'api_key': api_key, 'limit': 1 } json = requests.get(url, params=params).json() if json: data = json[0] data['word'] = " ".join(data['word'].split()) data['url'] = web.try_shorten(WEB_URL.format(data['word'])) data['attrib'] = ATTRIB_NAMES[data['sourceDictionary']] return "\x02{word}\x02: {text} - {url} ({attrib})".format(**data) else: return "I could not find a definition for \x02{}\x02.".format(word)
def format_user(user, show_url=True): """ Takes a SoundCloud user item and returns a formatted string. """ out = "\x02{}\x02".format(user['username']) if user['description']: out += ': "{}"'.format(formatting.truncate(user['description'])) if user['city']: out += ': {}'.format(user['city']) if user['country']: out += ", {}".format(formatting.truncate(user['country'])) out += " - \x02{track_count:,}\x02 tracks, \x02{playlist_count:,}\x02 playlists, \x02{followers_count:,}\x02 " \ "followers, \x02{followings_count:,}\x02 followed".format(**user) if show_url: out += " - {}".format(web.try_shorten(user['permalink_url'])) return out
def snopes(text): """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" try: params = {'sp_q': text, 'sp_c': "1"} request = requests.get(search_url, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) search_page = html.fromstring(request.text) result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "No matching pages found." try: _request = requests.get(result_urls[0]) _request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) snopes_page = html.fromstring(_request.text) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: {}".format(re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title()) status = " ".join(status.split()) # compress whitespace claim = formatting.truncate(" ".join(claim.split()), 150) url = web.try_shorten(result_urls[0]) return '"{}" {} - {}'.format(claim, status, url)
def dinner(): """- TELLS YOU WHAT THE F**K YOU SHOULD MAKE FOR DINNER""" try: request = requests.get(RANDOM_URL) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "I CANT GET A DAMN RECIPE: {}".format(e).upper() url = request.url try: data = get_data(url) except ParseError as e: return "I CANT READ THE F**KING RECIPE: {}".format(e).upper() name = data.name.strip().upper() text = random.choice(PHRASES).format(name) if CENSOR: text = text.replace("F**K", "F**K") return "{} - {}".format(text, web.try_shorten(url))
def validate(text): """validate <url> -- Runs url through the w3c markup validator.""" text = text.strip() if not urllib.parse.urlparse(text).scheme: text = "http://" + text params = {'uri': text} request = requests.get('http://validator.w3.org/check', params=params) info = request.headers url = web.try_shorten(request.url) status = info['x-w3c-validator-status'].lower() print(status) if status in ("valid", "invalid"): error_count = info['x-w3c-validator-errors'] warning_count = info['x-w3c-validator-warnings'] return "{} was found to be {} with {} error{} and {} warning{}" \ " - {}".format(text, status, error_count, "s"[error_count == 1:], warning_count, "s"[warning_count == 1:], url) elif status == "abort": return "Invalid input."
def amazon(text, _parsed=False): """<query> -- Searches Amazon for query""" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/41.0.2228.0 Safari/537.36', 'Referer': 'http://www.amazon.com/' } params = { 'url': 'search-alias', 'field-keywords': text.strip() } if _parsed: # input is from a link parser, we need a specific URL request = requests.get(SEARCH_URL.format(_parsed), params=params, headers=headers) else: request = requests.get(SEARCH_URL.format(REGION), params=params, headers=headers) soup = BeautifulSoup(request.text) results = soup.find('div', {'id': 'atfResults'}) if not results: if not _parsed: return "No results found." else: return results = results.find('ul', {'id': 's-results-list-atf'}).find_all('li', {'class': 's-result-item'}) item = results[0] asin = item['data-asin'] # here we use dirty html scraping to get everything we need title = formatting.truncate(item.find('h2', {'class': 's-access-title'}).text, 60) tags = [] # tags! if item.find('i', {'class': 'a-icon-prime'}): tags.append("$(b)Prime$(b)") if item.find('i', {'class': 'sx-bestseller-badge-primary'}): tags.append("$(b)Bestseller$(b)") if re.search(r"(Kostenlose Lieferung|Livraison gratuite|FREE Shipping|Envío GRATIS" r"|Spedizione gratuita)", item.text, re.I): tags.append("$(b)Free Shipping$(b)") price = item.find('span', {'class': ['s-price', 'a-color-price']}).text # use a whole lot of BS4 and regex to get the ratings try: pattern = re.compile(r'(product-reviews|#customerReviews)') rating = item.find('i', {'class': 'a-icon-star'}).find('span', {'class': 'a-icon-alt'}).text rating = re.search(r"([0-9]+(?:(?:\.|,)[0-9])?).*5", rating).group(1).replace(",", ".") num_ratings = item.find('a', {'href': pattern}).text.replace(".", ",") rating_str = "{}/5 stars ({} ratings)".format(rating, num_ratings) except AttributeError: rating_str = "No Ratings" # generate a short url if AFFILIATE_TAG: url = "http://www.amazon.com/dp/" + asin + "/?tag=" + AFFILIATE_TAG else: url = "http://www.amazon.com/dp/" + asin + "/" url = web.try_shorten(url) tag_str = " - " + ", ".join(tags) if tags else "" if not _parsed: return colors.parse("$(b){}$(b) ({}) - {}{} - {}".format(title, price, rating_str, tag_str, url)) else: return colors.parse("$(b){}$(b) ({}) - {}{}".format(title, price, rating_str, tag_str))
def lmgtfy(text): """[phrase] - gets a lmgtfy.com link for the specified phrase""" link = "http://lmgtfy.com/?q={}".format(requests.utils.quote(text)) return web.try_shorten(link)
def format_item(item): url = web.try_shorten(item.link) title = formatting.strip_html(item.title) return "{} ({})".format( title, url)
def lastfm(text, nick, db, bot, notice): """[user] [dontsave] - displays the now playing (or last played) track of LastFM user [user]""" api_key = bot.config.get("api_keys", {}).get("lastfm") if not api_key: return "Error: This command requires a LastFM API key." # check if the user asked us not to save his details dontsave = text.endswith(" dontsave") if dontsave: user = text[:-9].strip().lower() else: user = text if not user: user = get_account(nick) if not user: notice(lastfm.__doc__) return params = {'method': 'user.getrecenttracks', 'api_key': api_key, 'user': user, 'limit': 1} request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Failed to fetch info ({})".format(request.status_code) response = request.json() if 'error' in response: return "Last.FM Error: {}.".format(response["message"]) if "track" not in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0: return 'No recent tracks for user "{}" found.'.format(user) tracks = response["recenttracks"]["track"] if type(tracks) == list: # if the user is listening to something, the tracks entry is a list # the first item is the current track track = tracks[0] status = 'is listening to the song' ending = '' elif type(tracks) == dict: # otherwise, they aren't listening to anything right now, and # the tracks entry is a dict representing the most recent track track = tracks status = 'last listened to the song' # lets see how long ago they listened to it time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) time_since = timeformat.time_since(time_listened) ending = ' ({} ago)'.format(time_since) else: return "error: could not parse track listing" title = track["name"] album = track["album"]["#text"] artist = track["artist"]["#text"] url = web.try_shorten(track["url"]) out = '{} {} "{}"'.format(user, status, title) if artist: out += " by \x02{}\x0f".format(artist) if album: out += " from the album \x02{}\x0f".format(album) if url: out += " - {}".format(url) # append ending based on what type it was out += ending if text and not dontsave: db.execute("insert or replace into lastfm(nick, acc) values (:nick, :account)", {'nick': nick.lower(), 'account': user}) db.commit() load_cache(db) return out