def test_truncate_str(): assert truncate(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a assert truncate(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b # compatibility assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) - {}".format(name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) - {}".format(name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} - {}".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02.".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items.".format(len(playlist['tracks'])) seconds = round(int(playlist['duration'])/1000) out += " Running Time: {}.".format(timeformat.format_time(seconds, simple=True)) out += " Playlist Type: \x02{}\x02.".format(playlist['type']) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def format_game(app_id, show_url=True): """ Takes a Steam Store app ID and returns a formatted string with data about that app ID :type app_id: string :return: string """ params = {'appids': app_id} try: request = requests.get(API_URL, params=params, timeout=15) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get game info: {}".format(e) data = request.json() game = data[app_id]["data"] # basic info out = ["\x02{}\x02".format(game["name"])] desc = " ".join(formatting.strip_html(game["about_the_game"]).split()) out.append(formatting.truncate(desc, 75)) # genres try: genres = ", ".join([g['description'] for g in game["genres"]]) out.append("\x02{}\x02".format(genres)) except KeyError: # some things have no genre pass # release date if game['release_date']['coming_soon']: out.append("coming \x02{}\x02".format(game['release_date']['date'])) else: out.append("released \x02{}\x02".format(game['release_date']['date'])) # pricing if game['is_free']: out.append("\x02free\x02") elif not game.get("price_overview"): # game has no pricing, it's probably not released yet pass else: price = game['price_overview'] # the steam API sends prices as an int like "9999" for $19.99, we divmod to get the actual price if price['final'] == price['initial']: out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = "$%d.%02d" % divmod(price['final'], 100) price_original = "$%d.%02d" % divmod(price['initial'], 100) out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original)) if show_url: url = web.try_shorten(STORE_URL.format(game['steam_appid'])) out.append(url) return " - ".join(out)
def issue(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format(repo, issue)) j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 200) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format(j['closed_by']['login']) return '{}: ({}) | {} | {}'.format(title, state, url, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.'.format(count) else: return 'Repository has {} open issues.'.format(count)
def drama(text): """<phrase> - gets the first paragraph of the Encyclopedia Dramatica article on <phrase>""" search_response = requests.get(api_url, params={"action": "opensearch", "search": text}) if search_response.status_code != requests.codes.ok: return "Error searching: {}".format(search_response.status_code) data = search_response.json() if not data[1]: return "No results found." article_name = data[1][0].replace(' ', '_') url = ed_url + parse.quote(article_name, '') page_response = requests.get(url) if page_response.status_code != requests.codes.ok: return "Error getting page: {}".format(page_response.status_code) page = html.fromstring(page_response.text) for p in page.xpath('//div[@id="bodyContent"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 220) return "{} - {}".format(summary, url) return "Unknown Error."
def format_user(user, show_url=True): """ Takes a SoundCloud user item and returns a formatted string. """ out = "\x02{}\x02".format(user['username']) if user['description']: out += ': "{}"'.format(formatting.truncate(user['description'])) if user['city']: out += ': {}'.format(user['city']) if user['country']: out += ", {}".format(formatting.truncate(user['country'])) out += " - \x02{track_count:,}\x02 tracks, \x02{playlist_count:,}\x02 playlists, \x02{followers_count:,}\x02 " \ "followers, \x02{followings_count:,}\x02 followed".format(**user) if show_url: out += " - {}".format(web.try_shorten(user['permalink_url'])) return out
def bing(text, bot): """<query> - returns the first Bing search result for <query>""" api_key = bot.config.get("api_keys", {}).get("bing_azure") # handle NSFW show_nsfw = text.endswith(" nsfw") # remove "nsfw" from the input string after checking for it if show_nsfw: text = text[:-5].strip().lower() rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER if not api_key: return "Error: No Bing Azure API details." # why are these all differing formats and why does format have a $? ask microsoft params = { "Sources": bingify("web"), "Query": bingify(text), "Adult": bingify(rating), "$format": "json" } request = requests.get(API_URL, params=params, auth=(api_key, api_key)) # I'm not even going to pretend to know why results are in ['d']['results'][0] j = request.json()['d']['results'][0] if not j["Web"]: return "No results." result = j["Web"][0] # not entirely sure this even needs un-escaping, but it wont hurt to leave it in title = formatting.truncate(unescape(result["Title"]), 60) desc = formatting.truncate(unescape(result["Description"]), 150) url = unescape(result["Url"]) return colors.parse('{} -- $(b){}$(b): "{}"'.format(url, title, desc))
def format_group(group, show_url=True): """ Takes a SoundCloud group and returns a formatting string. """ out = "\x02{}\x02".format(group['name']) if group['description']: out += ': "{}"'.format(formatting.truncate(group['description'])) out += " - Owned by \x02{}\x02.".format(group['creator']['username']) if show_url: out += " - {}".format(web.try_shorten(group['permalink_url'])) return out
def wolframalpha(text, bot): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "Error: This command requires a Wolfram Alpha API key." params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate(item["Title"], 60) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item.get("IsPremierItem"): tags.append("\x02Premier\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_shorten(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def ftbwiki(text): """<phrase> - gets the first paragraph of the FTB Wiki article on <phrase>.""" try: request = requests.get(api_url, params={'search': text.strip()}) request.raise_for_status() j = request.json() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching search results: {}".format(e) except ValueError as e: return "Error reading search results: {}".format(e) if not j[1]: return "No results found." # we remove items with a '/' in the name, because # gamepedia uses sub-pages for different languages # for some stupid reason items = [item for item in j[1] if "/" not in item] if items: article_name = items[0].replace(' ', '_').encode('utf8') else: # there are no items without /, just return a / one article_name = j[1][0].replace(' ', '_').encode('utf8') url = mc_url + requests.utils.quote(article_name, '') try: request_ = requests.get(url) request_.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching wiki page: {}".format(e) page = html.fromstring(request_.text) for p in page.xpath('//div[@class="mw-content-ltr"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 250) return "{} - {}".format(summary, url) # this shouldn't happen return "Unknown Error."
def snopes(text): """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" try: params = {'sp_q': text, 'sp_c': "1"} request = requests.get(search_url, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) search_page = html.fromstring(request.text) result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "No matching pages found." try: _request = requests.get(result_urls[0]) _request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) snopes_page = html.fromstring(_request.text) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: {}".format(re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title()) status = " ".join(status.split()) # compress whitespace claim = formatting.truncate(" ".join(claim.split()), 150) url = web.try_shorten(result_urls[0]) return '"{}" {} - {}'.format(claim, status, url)
def format_output(item, show_url=False): """ takes a reddit post and returns a formatted sting """ item["title"] = formatting.truncate(item["title"], 70) item["link"] = short_url.format(item["id"]) raw_time = datetime.fromtimestamp(int(item["created_utc"])) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize(item["num_comments"], 'comment') item["points"] = formatting.pluralize(item["score"], 'point') if item["over_18"]: item["warning"] = " \x02NSFW\x02" else: item["warning"] = "" if show_url: return "\x02{title} : {subreddit}\x02 - {comments}, {points}" \ " - \x02{author}\x02 {timesince} ago - {link}{warning}".format(**item) else: return "\x02{title} : {subreddit}\x02 - {comments}, {points}" \ " - \x02{author}\x02, {timesince} ago{warning}".format(**item)
def suggest(text): """suggest <phrase> -- Gets suggested phrases for a google search""" params = {'output': 'json', 'client': 'hp', 'q': text} try: request = requests.get('http://google.com/complete/search', params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get suggestions: {}".format(e) page = request.text page_json = page.split('(', 1)[1][:-1] suggestions = json.loads(page_json)[1] suggestions = [suggestion[0] for suggestion in suggestions] if not suggestions: return 'No suggestions found.' out = formatting.strip_html(", ".join(suggestions)) return formatting.truncate(out, 200)
def amazon(text, _parsed=False): """<query> -- Searches Amazon for query""" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/41.0.2228.0 Safari/537.36', 'Referer': 'http://www.amazon.com/' } params = { 'url': 'search-alias', 'field-keywords': text.strip() } if _parsed: # input is from a link parser, we need a specific URL request = requests.get(SEARCH_URL.format(_parsed), params=params, headers=headers) else: request = requests.get(SEARCH_URL.format(REGION), params=params, headers=headers) soup = BeautifulSoup(request.text) results = soup.find('div', {'id': 'atfResults'}) if not results: if not _parsed: return "No results found." else: return results = results.find('ul', {'id': 's-results-list-atf'}).find_all('li', {'class': 's-result-item'}) item = results[0] asin = item['data-asin'] # here we use dirty html scraping to get everything we need title = formatting.truncate(item.find('h2', {'class': 's-access-title'}).text, 60) tags = [] # tags! if item.find('i', {'class': 'a-icon-prime'}): tags.append("$(b)Prime$(b)") if item.find('i', {'class': 'sx-bestseller-badge-primary'}): tags.append("$(b)Bestseller$(b)") if re.search(r"(Kostenlose Lieferung|Livraison gratuite|FREE Shipping|Envío GRATIS" r"|Spedizione gratuita)", item.text, re.I): tags.append("$(b)Free Shipping$(b)") price = item.find('span', {'class': ['s-price', 'a-color-price']}).text # use a whole lot of BS4 and regex to get the ratings try: pattern = re.compile(r'(product-reviews|#customerReviews)') rating = item.find('i', {'class': 'a-icon-star'}).find('span', {'class': 'a-icon-alt'}).text rating = re.search(r"([0-9]+(?:(?:\.|,)[0-9])?).*5", rating).group(1).replace(",", ".") num_ratings = item.find('a', {'href': pattern}).text.replace(".", ",") rating_str = "{}/5 stars ({} ratings)".format(rating, num_ratings) except AttributeError: rating_str = "No Ratings" # generate a short url if AFFILIATE_TAG: url = "http://www.amazon.com/dp/" + asin + "/?tag=" + AFFILIATE_TAG else: url = "http://www.amazon.com/dp/" + asin + "/" url = web.try_shorten(url) tag_str = " - " + ", ".join(tags) if tags else "" if not _parsed: return colors.parse("$(b){}$(b) ({}) - {}{} - {}".format(title, price, rating_str, tag_str, url)) else: return colors.parse("$(b){}$(b) ({}) - {}{}".format(title, price, rating_str, tag_str))
def urban(text): """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" headers = { "Referer": "http://m.urbandictionary.com" } if text: # clean and split the input text = text.lower().strip() parts = text.split() # if the last word is a number, set the ID to that number if parts[-1].isdigit(): id_num = int(parts[-1]) # remove the ID from the input string del parts[-1] text = " ".join(parts) else: id_num = 1 # fetch the definitions try: params = {"term": text} request = requests.get(define_url, params=params, headers=headers) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get definition: {}".format(e) page = request.json() if page['result_type'] == 'no_results': return 'Not found.' else: # get a random definition! try: request = requests.get(random_url, headers=headers) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get definition: {}".format(e) page = request.json() id_num = None definitions = page['list'] if id_num: # try getting the requested definition try: definition = definitions[id_num - 1] def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate(def_text, 200) except IndexError: return 'Not found.' url = definition['permalink'] output = "[{}/{}] {} - {}".format(id_num, len(definitions), def_text, url) else: definition = random.choice(definitions) def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate(def_text, 200) name = definition['word'] url = definition['permalink'] output = "\x02{}\x02: {} - {}".format(name, def_text, url) return output