def format_output(item, show_url=False): """ takes a voat post and returns a formatted string """ if not item["Title"]: item["Title"] = formatting.truncate(item["Linkdescription"], 70) else: item["Title"] = formatting.truncate(item["Title"], 70) item["link"] = voat_fill_url.format(item["Subverse"], item["Id"]) raw_time = isodate.parse_date(item['Date']) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize(item["CommentCount"], 'comment') item["points"] = formatting.pluralize(item["Likes"], 'point') if item["Type"] == 2: item["warning"] = " \x02Link\x02" else: item["warning"] = "" if show_url: return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \ " - \x02{Name}\x02 {timesince} ago - {link}{warning}".format(**item) else: return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \ " - \x02{Name}\x02, {timesince} ago{warning}".format(**item)
def format_output(item, show_url=False): """ takes a voat post and returns a formatted string """ if not item["Title"]: item["Title"] = formatting.truncate(item["Linkdescription"], 70) else: item["Title"] = formatting.truncate(item["Title"], 70) item["link"] = voat_fill_url.format(item["Subverse"], item["Id"]) raw_time = isodate.parse_date(item["Date"]) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize(item["CommentCount"], "comment") item["points"] = formatting.pluralize(item["Likes"], "point") if item["Type"] == 2: item["warning"] = " \x02Link\x02" else: item["warning"] = "" if show_url: return ( "\x02{Title} : {Subverse}\x02 - {comments}, {points}" " - \x02{Name}\x02 {timesince} ago - {link}{warning}".format(**item) ) else: return ( "\x02{Title} : {Subverse}\x02 - {comments}, {points}" " - \x02{Name}\x02, {timesince} ago{warning}".format(**item) )
def test_truncate_str(): assert truncate(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a assert truncate(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b # compatibility assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b
def get_wiki(params, show_url=False): params["format"] = "json" try: request = requests.get(api_url, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get Wikipedia page: {}".format(e) data = request.json() if not len(data[1]): return 'No results found.' idx = 0 if "may refer to" in data[2][0]: idx = 1 title, snippet, url = data[1][idx], data[2][idx], data[3][idx] # remove disambiguation parenthetical from title if paren_re.search(title): title = paren_re.sub("", title) out = snippet if title.lower() in snippet.lower( ) else title + ": " + snippet #snippet = ' '.join(desc.split()) # remove excess spaces out = formatting.truncate(out, 350) if show_url: out += " [div] [h3]{}[/h3]".format( requests.utils.quote(url, safe=":/%")) return out
def issue(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format( repo, issue)) r.raise_for_status() j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 25) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format( j['closed_by']['login']) return 'Issue #{} ({}): {} | {}: {}'.format(number, state, url, title, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) r.raise_for_status() j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.' else: return 'Repository has {} open issues.'.format(count)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) - {}".format(name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) - {}".format(name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} - {}".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def format_output(item, show_url=False): """ takes a reddit post and returns a formatted string """ item["title"] = formatting.truncate(item["title"], 70) item["link"] = short_url.format(item["id"]) raw_time = datetime.fromtimestamp(int(item["created_utc"])) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize(item["num_comments"], "comment") item["points"] = formatting.pluralize(item["score"], "point") if item["over_18"]: item["warning"] = " \x02NSFW\x02" else: item["warning"] = "" if show_url: return ( "\x02{title} : {subreddit}\x02 - {comments}, {points}" " - \x02{author}\x02 {timesince} ago - {link}{warning}".format(**item) ) else: return ( "\x02{title} : {subreddit}\x02 - {comments}, {points}" " - \x02{author}\x02, {timesince} ago{warning}".format(**item) )
def issue_cmd(text): """<username|repo> [number] - gets issue [number]'s summary, or the open issue count if no issue is specified""" args = text.split() owner, repo = parse_url(args[0] if args[0] not in shortcuts else shortcuts[args[0]]) issue = args[1] if len(args) > 1 else None if issue: r = requests.get("https://api.github.com/repos/{}/{}/issues/{}".format( owner, repo, issue)) r.raise_for_status() j = r.json() url = web.try_shorten(j["html_url"], service="git.io") number = j["number"] title = j["title"] summary = formatting.truncate(j["body"].split("\n")[0], 25) if j["state"] == "open": state = "\x033\x02Opened\x02\x0f by {}".format(j["user"]["login"]) else: state = "\x034\x02Closed\x02\x0f by {}".format( j["closed_by"]["login"]) return "Issue #{} ({}): {} | {}: {}".format(number, state, url, title, summary) r = requests.get("https://api.github.com/repos/{}/{}/issues".format( owner, repo)) r.raise_for_status() j = r.json() count = len(j) if count == 0: return "Repository has no open issues." return "Repository has {} open issues.".format(count)
def format_output(item, show_url=False): """ takes a reddit post and returns a formatted string """ item["title"] = formatting.truncate(item["title"], 70) item["link"] = short_url.format(item["id"]) raw_time = datetime.fromtimestamp(int(item["created_utc"])) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize_auto(item["num_comments"], 'comment') item["points"] = formatting.pluralize_auto(item["score"], 'point') if item["over_18"]: item["warning"] = colors.parse(" $(b, red)NSFW$(clear)") else: item["warning"] = "" if show_url: item["url"] = " - " + item["link"] else: item["url"] = "" return colors.parse( "$(b){title} : {subreddit}$(b) - {comments}, {points}" " - $(b){author}$(b) {timesince} ago{url}{warning}").format(**item)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate(data['description'], 30) url = data['website'] if data['authors']: authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] else: authors = "Unknown" stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_shorten(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) - {}".format( name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) - {}".format( name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} - {}".format( version_number, bukkit_versions, last_update, link) return line_a, line_b
def suggest(text, reply): """<phrase> - Gets suggested phrases for a google search""" params = {'output': 'json', 'client': 'hp', 'q': text} try: request = requests.get('http://google.com/complete/search', params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: reply("Could not get suggestions: {}".format(e)) raise page = request.text page_json = page.split('(', 1)[1][:-1] suggestions = json.loads(page_json)[1] suggestions = [suggestion[0] for suggestion in suggestions] if not suggestions: return 'No suggestions found.' out = formatting.strip_html(", ".join(suggestions)) return formatting.truncate(out, 200)
def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items,".format(len(playlist['tracks'])) seconds = round(int(playlist['duration'])/1000) out += " {}".format(timeformat.format_time(seconds, simple=True)) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def issue(text): """ghissue <username|repo> [number] - gets issue [number]'s summary from GitHub, or the open issue count if no issue is specified.""" args = text.split() repo = args[0] if args[0] not in shortcuts else shortcuts[args[0]] issue = args[1] if len(args) > 1 else None if issue: r = requests.get('https://api.github.com/repos/{}/issues/{}'.format(repo, issue)) j = r.json() url = web.try_shorten(j['html_url'], service='git.io') number = j['number'] title = j['title'] summary = formatting.truncate(j['body'].split('\n')[0], 25) if j['state'] == 'open': state = '\x033\x02Opened\x02\x0f by {}'.format(j['user']['login']) else: state = '\x034\x02Closed\x02\x0f by {}'.format(j['closed_by']['login']) return 'Issue #{} ({}): {} | {}: {}'.format(number, state, url, title, summary) else: r = requests.get('https://api.github.com/repos/{}/issues'.format(repo)) j = r.json() count = len(j) if count is 0: return 'Repository has no open issues.' else: return 'Repository has {} open issues.'.format(count)
def drama(text): """<phrase> - gets the first paragraph of the Encyclopedia Dramatica article on <phrase>""" search_response = requests.get(api_url, params={ "action": "opensearch", "search": text }) if search_response.status_code != requests.codes.ok: return "Error searching: {}".format(search_response.status_code) data = search_response.json() if not data[1]: return "No results found." article_name = data[1][0].replace(' ', '_') url = ed_url + parse.quote(article_name, '') page_response = requests.get(url) if page_response.status_code != requests.codes.ok: return "Error getting page: {}".format(page_response.status_code) page = html.fromstring(page_response.text) for p in page.xpath('//div[@id="bodyContent"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 220) return "{} - {}".format(summary, url) return "Unknown Error."
def get_info(url, show_url=True): """ Takes a SCPWiki URL and returns a formatted string """ try: request = requests.get(url) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: raise SCPError("Error: Unable to fetch URL. ({})".format(e)) html = request.text contents = formatting.strip_html(html) try: item_id = re.findall("Item #: (.+?)\n", contents, re.S)[0] object_class = re.findall("Object Class: (.+?)\n", contents, re.S)[0] description = re.findall("Description: (.+?)\n", contents, re.S)[0] except IndexError: raise SCPError( "Error: Invalid or unreadable SCP. Does this SCP exist?") description = formatting.truncate(description, 130) short_url = web.try_shorten(url) # get the title from our pre-generated cache if item_id in scp_cache: title = scp_cache[item_id][0] else: title = "Unknown" if show_url: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {} - {}".format(title, item_id, object_class, description, short_url) else: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {}".format(title, item_id, object_class, description)
def get_info(url, show_url=True): """ Takes a SCPWiki URL and returns a formatted string """ try: request = requests.get(url) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: raise SCPError("Error: Unable to fetch URL. ({})".format(e)) html = request.text contents = formatting.strip_html(html) try: item_id = re.findall("Item #: (.+?)\n", contents, re.S)[0] object_class = re.findall("Object Class: (.+?)\n", contents, re.S)[0] description = re.findall("Description: (.+?)\n", contents, re.S)[0] except IndexError: raise SCPError("Error: Invalid or unreadable SCP. Does this SCP exist?") description = formatting.truncate(description, 130) short_url = web.try_shorten(url) # get the title from our pre-generated cache if item_id in scp_cache: title = scp_cache[item_id][0] else: title = "Unknown" if show_url: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {} - {}".format(title, item_id, object_class, description, short_url) else: return "\x02Item Name:\x02 {}, \x02Item #:\x02 {}, \x02Class\x02: {}," \ " \x02Description:\x02 {}".format(title, item_id, object_class, description)
def wiki(text, reply): """<phrase> - Gets first sentence of Wikipedia article on <phrase>.""" search_params = {'srsearch': text.strip()} try: with requests.get(search_url, params=search_params) as response: response.raise_for_status() data = response.json() except RequestException: reply("Could not get Wikipedia page") raise for result in data['query']['search']: title = result['title'] info = get_info(title) if info['type'] != 'standard': continue desc = info['extract'] url = info['content_urls']['desktop']['page'] break else: return "No results found." if desc: desc = formatting.truncate(desc, 200) else: desc = "(No Summary)" return '{} :: {} :: {}'.format(title, desc, url)
def format_playlist(playlist, show_url=True): """ Takes a SoundCloud playlist item and returns a formatted string. """ out = "\x02{}\x02".format(playlist['title']) if playlist['description']: out += ': "{}"'.format(formatting.truncate(playlist['description'])) if playlist['genre']: out += " - \x02{}\x02".format(playlist['genre']) out += " - by \x02{}\x02".format(playlist['user']['username']) if not playlist['tracks']: out += " - No items" else: out += " - {} items,".format(len(playlist['tracks'])) seconds = round(int(playlist['duration']) / 1000) out += " {}".format(timeformat.format_time(seconds, simple=True)) if show_url: out += " - {}".format(web.try_shorten(playlist['permalink_url'])) return out
def drama(text): """<phrase> - gets the first paragraph of the Encyclopedia Dramatica article on <phrase>""" search_response = requests.get(api_url, params={"action": "opensearch", "search": text}) if search_response.status_code != requests.codes.ok: return "Error searching: {}".format(search_response.status_code) data = search_response.json() if not data[1]: return "No results found." article_name = data[1][0].replace(' ', '_') url = ed_url + parse.quote(article_name, '') page_response = requests.get(url) if page_response.status_code != requests.codes.ok: return "Error getting page: {}".format(page_response.status_code) page = html.fromstring(page_response.text) for p in page.xpath('//div[@id="bodyContent"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 220) return "{} - {}".format(summary, url) return "Unknown Error."
def bing(text, bot, reply): """<query> - returns the first bing search result for <query>""" api_key = bot.config.get("api_keys", {}).get("bing_azure") # handle NSFW show_nsfw = text.endswith(" nsfw") # remove "nsfw" from the input string after checking for it if show_nsfw: text = text[:-5].strip().lower() rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER if not api_key: return "Error: No Bing Azure API details." # why are these all differing formats and why does format have a $? ask microsoft params = { "Sources": bingify("web"), "Query": bingify(text), "Adult": bingify(rating), "$format": "json" } request = requests.get(API_URL, params=params, auth=(api_key, api_key)) try: request.raise_for_status() except HTTPError: reply("Bing API error occurred.") raise # I'm not even going to pretend to know why results are in ['d']['results'][0] j = request.json()['d']['results'][0] if not j["Web"]: return "No results." result = j["Web"][0] # not entirely sure this even needs un-escaping, but it wont hurt to leave it in title = formatting.truncate(unescape(result["Title"]), 60) desc = formatting.truncate(unescape(result["Description"]), 150) url = unescape(result["Url"]) return colors.parse( '\x02Notice: The Bing API will stop working sometime soon because Microsoft is greedy as f**k and is removing the free search tier.\x02 {} -- $(b){}$(b): "{}"'.format( url, title, desc))
def format_game(app_id, show_url=True): """ Takes a Steam Store app ID and returns a formatted string with data about that app ID :type app_id: string :return: string """ params = {'appids': app_id} try: data = http.get_json(API_URL, params=params, timeout=15) except Exception as e: return f"Could not get game info: {e}" game = data[app_id]["data"] # basic info out = ["\x02{}\x02".format(game["name"])] desc = " ".join(formatting.strip_html(game["about_the_game"]).split()) out.append(formatting.truncate(desc, 75)) # genres try: genres = ", ".join([g['description'] for g in game["genres"]]) out.append("\x02{}\x02".format(genres)) except KeyError: # some things have no genre pass # release date if game['release_date']['coming_soon']: out.append("coming \x02{}\x02".format(game['release_date']['date'])) else: out.append("released \x02{}\x02".format(game['release_date']['date'])) # pricing if game['is_free']: out.append("\x02free\x02") elif not game.get("price_overview"): # game has no pricing, it's probably not released yet pass else: price = game['price_overview'] # the steam API sends prices as an int like "9999" for $19.99, we divmod to get the actual price if price['final'] == price['initial']: out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = "$%d.%02d" % divmod(price['final'], 100) price_original = "$%d.%02d" % divmod(price['initial'], 100) out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original)) if show_url: url = web.try_shorten(STORE_URL.format(game['steam_appid'])) out.append(url) return " - ".join(out)
def sanitize(definition): def_text = re.sub("<strong>|</strong>", "\x02", definition) def_text = re.sub("<br />|<br>", " ", def_text) def_text = re.sub("<.*?>", " ", def_text) def_text = re.sub("\s+", " ", def_text) l = def_text.splitlines() n = [item.strip() for item in l] def_text = " ".join(n).strip() def_text = formatting.truncate(def_text, 380) return def_text
def wolframalpha(text, bot, reply): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) try: request.raise_for_status() except HTTPError as e: reply("Error getting query: {}".format(e.response.status_code)) raise if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def format_user(user, show_url=True): """ Takes a SoundCloud user item and returns a formatted string. """ out = "\x02{}\x02".format(user['username']) if user['description']: out += ': "{}"'.format(formatting.truncate(user['description'])) if user['city']: out += ': {}'.format(user['city']) if user['country']: out += ", {}".format(formatting.truncate(user['country'])) out += " - \x02{track_count:,}\x02 tracks, \x02{playlist_count:,}\x02 playlists, \x02{followers_count:,}\x02 " \ "followers, \x02{followings_count:,}\x02 followed".format(**user) if show_url: out += " - {}".format(web.try_shorten(user['permalink_url'])) return out
def bing(text, bot): """<query> - returns the first bing search result for <query>""" api_key = bot.config.get("api_keys", {}).get("bing_azure") # handle NSFW show_nsfw = text.endswith(" nsfw") # remove "nsfw" from the input string after checking for it if show_nsfw: text = text[:-5].strip().lower() rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER if not api_key: return "Error: No Bing Azure API details." # why are these all differing formats and why does format have a $? ask microsoft params = { "Sources": bingify("web"), "Query": bingify(text), "Adult": bingify(rating), "$format": "json" } request = requests.get(API_URL, params=params, auth=(api_key, api_key)) # I'm not even going to pretend to know why results are in ['d']['results'][0] j = request.json()['d']['results'][0] if not j["Web"]: return "No results." result = j["Web"][0] # not entirely sure this even needs un-escaping, but it wont hurt to leave it in title = formatting.truncate(unescape(result["Title"]), 60) desc = formatting.truncate(unescape(result["Description"]), 150) url = unescape(result["Url"]) return colors.parse('{} -- $(b){}$(b): "{}"'.format(url, title, desc))
def format_group(group, show_url=True): """ Takes a SoundCloud group and returns a formatting string. """ out = "\x02{}\x02".format(group['name']) if group['description']: out += ': "{}"'.format(formatting.truncate(group['description'])) out += " - Owned by \x02{}\x02.".format(group['creator']['username']) if show_url: out += " - {}".format(web.try_shorten(group['permalink_url'])) return out
def mcwiki(text, reply): """<phrase> - gets the first paragraph of the Minecraft Wiki article on <phrase>""" try: request = requests.get(api_url, params={'search': text.strip()}) request.raise_for_status() j = request.json() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: reply("Error fetching search results: {}".format(e)) raise except ValueError as e: reply("Error reading search results: {}".format(e)) raise if not j[1]: return "No results found." # we remove items with a '/' in the name, because # gamepedia uses sub-pages for different languages # for some stupid reason items = [item for item in j[1] if "/" not in item] if items: article_name = items[0].replace(' ', '_').encode('utf8') else: # there are no items without /, just return a / one article_name = j[1][0].replace(' ', '_').encode('utf8') url = mc_url + requests.utils.quote(article_name, '') try: request_ = requests.get(url) request_.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: reply("Error fetching wiki page: {}".format(e)) raise page = html.fromstring(request_.text) for p in page.xpath('//div[@class="mw-content-ltr"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub(r'\[\d+\]', '', summary) summary = formatting.truncate(summary, 200) return "{} :: {}".format(summary, url) # this shouldn't happen return "Unknown Error."
def amazon(text, _parsed=False): """<query> -- Searches Amazon for query""" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/41.0.2228.0 Safari/537.36', 'Referer': 'http://www.amazon.com/' } params = { 'url': 'search-alias', 'field-keywords': text.strip() } if _parsed: # input is from a link parser, we need a specific URL request = requests.get(SEARCH_URL.format(_parsed), params=params, headers=headers) else: request = requests.get(SEARCH_URL.format(REGION), params=params, headers=headers) soup = BeautifulSoup(request.text) # check if there are any results on the amazon page results = soup.find('div', {'id': 'atfResults'}) if not results: if not _parsed: return "No results found." else: return # get the first item from the results on the amazon page results = results.find('ul', {'id': 's-results-list-atf'}).find_all('li', {'class': 's-result-item'}) item = results[0] asin = item['data-asin'] # here we use dirty html scraping to get everything we need title = formatting.truncate(item.find('h2', {'class': 's-access-title'}).text, 60) tags = [] # tags! if item.find('i', {'class': 'a-icon-prime'}): tags.append("$(b)Prime$(b)") if item.find('i', {'class': 'sx-bestseller-badge-primary'}): tags.append("$(b)Bestseller$(b)") # we use regex because we need to recognise text for this part # the other parts detect based on html tags, not text if re.search(r"(Kostenlose Lieferung|Livraison gratuite|FREE Shipping|Envío GRATIS" r"|Spedizione gratuita)", item.text, re.I): tags.append("$(b)Free Shipping$(b)") price = item.find('span', {'class': ['s-price', 'a-color-price']}).text
def top_list(prefix, data, join_char=' • '): r""" >>> foods = [('Spam', 1), ('Eggs', 4)] >>> top_list("Top Foods: ", foods) 'Top Foods: \x02E\u200bggs\x02: 4 • \x02S\u200bpam\x02: 1' """ sorted_data = sorted(data, key=operator.itemgetter(1), reverse=True) return truncate( prefix + join_char.join("\x02{}\x02: {:,}".format(k[:1] + '\u200b' + k[1:], v) for k, v in sorted_data), sep=join_char, length=320, )
def wolframalpha(text, bot): """<query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" params = { 'input': text, 'appid': api_key } request = requests.get(api_url, params=params) if request.status_code != requests.codes.ok: return "Error getting query: {}".format(request.status_code) result = etree.fromstring(request.content, parser=parser) # get the URL for a user to view this query in a browser short_url = web.try_shorten(query_url.format(urllib.parse.quote_plus(text))) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def sanitize(definition): def_text = re.sub("<p><p><strong>Sonhar com .+?</strong>", "", definition, flags=re.DOTALL) def_text = re.sub("Significado de sonhar com .+?\.", "", def_text) def_text = re.sub("Significado Sonhar com .+?<p>", "", def_text) def_text = re.sub("<script .+?</script>", "", def_text, flags=re.DOTALL) def_text = re.sub("<.*?>"," ", def_text, flags=re.DOTALL) def_text = re.sub("<br />|<br>"," ", def_text) def_text = re.sub("\s+?,", ",", def_text) def_text = re.sub("\s+"," ", def_text) def_text = re.sub(" "," ", def_text) l = def_text.splitlines() n = [item.strip() for item in l] def_text = " ".join(n).strip() def_text = formatting.truncate(def_text, 380) return def_text
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate(item["Title"], 60) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format( item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item.get("IsPremierItem"): tags.append("\x02Premier\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_shorten(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key})) if data['description']: desc = ": {} ".format(formatting.truncate(data['description'], 50)) else: desc = "" if data['genre']: genre = "- Genre: \x02{}\x02 ".format(data['genre']) else: genre = "" url = web.try_shorten(data['permalink_url']) return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'], data['comment_count'], url)
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate(item["Title"], 60) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item.get("IsPremierItem"): tags.append("\x02Premier\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_shorten(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def mcwiki(text): """mcwiki <phrase> - gets the first paragraph of the Minecraft Wiki article on <phrase>""" try: request = requests.get(api_url, params={'search': text.strip()}) request.raise_for_status() j = request.json() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching search results: {}".format(e) except ValueError as e: return "Error reading search results: {}".format(e) if not j[1]: return "No results found." # we remove items with a '/' in the name, because # gamepedia uses sub-pages for different languages # for some stupid reason items = [item for item in j[1] if "/" not in item] if items: article_name = items[0].replace(' ', '_').encode('utf8') else: # there are no items without /, just return a / one article_name = j[1][0].replace(' ', '_').encode('utf8') url = mc_url + requests.utils.quote(article_name, '') try: request_ = requests.get(url) request_.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching wiki page: {}".format(e) page = html.fromstring(request_.text) for p in page.xpath('//div[@class="mw-content-ltr"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 200) return "{} :: {}".format(summary, url) # this shouldn't happen return "Unknown Error."
def snopes(text): """<topic> - Searches snopes for an urban legend about <topic>.""" try: params = {'sp_q': text, 'sp_c': "1"} request = requests.get(search_url, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) search_page = html.fromstring(request.text) result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "No matching pages found." try: _request = requests.get(result_urls[0]) _request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) snopes_page = html.fromstring(_request.text) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: {}".format( re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title()) status = " ".join(status.split()) # compress whitespace claim = formatting.truncate(" ".join(claim.split()), 150) url = web.try_shorten(result_urls[0]) return '"{}" {} - {}'.format(claim, status, url)
def format_item(tld, item, show_url=True): """ takes a newegg API item object and returns a description """ additional = item['Additional'] item = item['Basic'] start = "[h1]Newegg:[/h1] {} [div] ".format( formatting.truncate(item["Title"], 160)) out = [] if item["FinalPrice"] == item["OriginalPrice"] or not item["OriginalPrice"]: price = "$(b){}$(b)".format(item["FinalPrice"]) else: price = "$(b){FinalPrice}$(b), was {OriginalPrice}".format(**item) out.append("{}{}".format(CURRENCY[tld], price)) # format the rating nicely if it exists if item["ReviewSummary"]["TotalReviews"]: out.append("{}/5 ({} ratings)".format( item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"])) tags = [] if not item["Instock"]: tags.append("$(red)Out Of Stock$(c)") if item["IsFreeShipping"]: tags.append("Free Shipping") if item.get("IsPremierItem"): tags.append("Premier") if item["IsFeaturedItem"]: tags.append("Featured") if additional["IsShellShockerItem"]: tags.append("$(b)SHELL SHOCKER$(b)") if tags: out.append(", ".join(tags)) if show_url: out.append("[h3]{}[/h3]".format( ITEM_URL.format(tld, item["NeweggItemNumber"]))) return start + " [div] ".join(out)
def format_output(item, show_url=False): """ takes a reddit post and returns a formatted string """ item["title"] = html.unescape(formatting.truncate(item["title"], 200)) item["link"] = short_url.format(item["id"]) # Fix some URLs if not item["is_self"] and item["url"]: # Use .gifv links for imgur if "imgur.com/" in item["url"] and item["url"].endswith(".gif"): item["url"] += "v" # Fix i.reddituploads.com crap ("&" in URL) if "i.reddituploads.com/" in item["url"]: # Get i.redditmedia.com preview (first one is full size) item["url"] = item["preview"]["images"][0]["source"]["url"] # Unescape since reddit gives links for HTML item["url"] = html.unescape(item["url"]) raw_time = datetime.fromtimestamp(int(item["created_utc"])) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize_auto(item["num_comments"], 'comment').replace(",", "") item["points"] = formatting.pluralize_auto(item["score"], 'point').replace(",", "") out = [] if show_url and item["link"]: out.append("[h3]{link}[/h3]") out.append("{title}") if not item["is_self"]: out.append("{url}") if item["over_18"]: out.append("$(red)NSFW$(c)") out.extend(["/r/{subreddit}", "/u/{author}", "{timesince} ago", "{points}", "{comments}"]) if item["gilded"]: item["gilded"] = formatting.pluralize_auto(item["gilded"], 'gild') out.append("$(yellow){gilded}$(c)") return "[h1]Reddit:[/h1] " + " [div] ".join(out).format(**item)
def wolframalpha(text, bot): """<query> - Computes <query> using Wolfram Alpha.""" api_key = bot.config.get_api_key("wolframalpha") if not api_key: return "This command requires a Wolfram Alpha API key." try: params = {'input': text, 'appid': api_key} data = http.get_xml(api_url, params=params) except: return "WolframAlpha API error, please try again in a few minutes." pod_texts = [] for pod in data.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' # I have no idea what this regex does. ret = re.sub(r'\\(.)', r'\1', ret) ret = formatting.truncate(ret, 250) if not ret: return 'No results.' return ret
def snopes(text): """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" try: params = {'sp_q': text, 'sp_c': "1"} request = requests.get(search_url, params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) search_page = html.fromstring(request.text) result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "No matching pages found." try: _request = requests.get(result_urls[0]) _request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error finding results: {}".format(e) snopes_page = html.fromstring(_request.text) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: {}".format(re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title()) status = " ".join(status.split()) # compress whitespace claim = formatting.truncate(" ".join(claim.split()), 150) url = web.try_shorten(result_urls[0]) return '"{}" {} - {}'.format(claim, status, url)
def wiki(text, reply): """<phrase> - Gets first sentence of Wikipedia article on <phrase>.""" try: request = requests.get(search_url, params={'search': text.strip()}) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: reply("Could not get Wikipedia page: {}".format(e)) raise x = etree.fromstring(request.text, parser=parser) ns = '{http://opensearch.org/searchsuggest2}' items = x.findall(ns + 'Section/' + ns + 'Item') if not items: if x.find('error') is not None: return 'Could not get Wikipedia page: %(code)s: %(info)s' % x.find( 'error').attrib else: return 'No results found.' def extract(item): return [item.find(ns + i).text for i in ('Text', 'Description', 'Url')] title, desc, url = extract(items[0]) if 'may refer to' in desc: title, desc, url = extract(items[1]) title = paren_re.sub('', title) if title.lower() not in desc.lower(): desc = title + desc desc = ' '.join(desc.split()) # remove excess spaces desc = formatting.truncate(desc, 200) return '{} :: {}'.format(desc, requests.utils.quote(url, ':/%'))
def format_output(item, show_url=False): """ takes a reddit post and returns a formatted string """ item["title"] = formatting.truncate(item["title"], 70) item["link"] = short_url.format(item["id"]) raw_time = datetime.fromtimestamp(int(item["created_utc"])) item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True) item["comments"] = formatting.pluralize(item["num_comments"], 'comment') item["points"] = formatting.pluralize(item["score"], 'point') if item["over_18"]: item["warning"] = " \x02NSFW\x02" else: item["warning"] = "" if show_url: return "\x02{title} : {subreddit}\x02 - {comments}, {points}" \ " - \x02{author}\x02 {timesince} ago - {link}{warning}".format(**item) else: return "\x02{title} : {subreddit}\x02 - {comments}, {points}" \ " - \x02{author}\x02, {timesince} ago{warning}".format(**item)
def suggest(text): """suggest <term> -- Retrieves a list of suggested terms for a google search""" params = {"output": "json", "client": "hp", "q": text} try: request = requests.get("http://google.com/complete/search", params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get suggestions: {}".format(e) page = request.text page_json = page.split("(", 1)[1][:-1] suggestions = json.loads(page_json)[1] suggestions = [suggestion[0] for suggestion in suggestions] if not suggestions: return "No suggestions found." out = formatting.strip_html(", ".join(suggestions)) return formatting.truncate(out, 200)
def wiki(text): """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>.""" try: request = requests.get(search_url, params={'search': text.strip()}) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get Wikipedia page: {}".format(e) x = etree.fromstring(request.text, parser=parser) ns = '{http://opensearch.org/searchsuggest2}' items = x.findall(ns + 'Section/' + ns + 'Item') if not items: if x.find('error') is not None: return 'Could not get Wikipedia page: %(code)s: %(info)s' % x.find('error').attrib else: return 'No results found.' def extract(item): return [item.find(ns + i).text for i in ('Text', 'Description', 'Url')] title, desc, url = extract(items[0]) if 'may refer to' in desc: title, desc, url = extract(items[1]) title = paren_re.sub('', title) if title.lower() not in desc.lower(): desc = title + desc desc = ' '.join(desc.split()) # remove excess spaces desc = formatting.truncate(desc, 200) return '{} :: {}'.format(desc, requests.utils.quote(url, ':/%'))
def wiki(text): """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>.""" try: request = requests.get(search_url, params={"search": text.strip()}) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get Wikipedia page: {}".format(e) x = etree.fromstring(request.text, parser=parser) ns = "{http://opensearch.org/searchsuggest2}" items = x.findall(ns + "Section/" + ns + "Item") if not items: if x.find("error") is not None: return "Could not get Wikipedia page: %(code)s: %(info)s" % x.find("error").attrib else: return "No results found." def extract(item): return [item.find(ns + i).text for i in ("Text", "Description", "Url")] title, desc, url = extract(items[0]) if "may refer to" in desc: title, desc, url = extract(items[1]) title = paren_re.sub("", title) if title.lower() not in desc.lower(): desc = title + desc desc = " ".join(desc.split()) # remove excess spaces desc = formatting.truncate(desc, 200) return "{} :: {}".format(desc, requests.utils.quote(url, ":/"))
def suggest(text): """suggest <phrase> -- Gets suggested phrases for a google search""" params = {'output': 'json', 'client': 'hp', 'q': text} try: request = requests.get('http://google.com/complete/search', params=params) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get suggestions: {}".format(e) page = request.text page_json = page.split('(', 1)[1][:-1] suggestions = json.loads(page_json)[1] suggestions = [suggestion[0] for suggestion in suggestions] if not suggestions: return 'No suggestions found.' out = formatting.strip_html(", ".join(suggestions)) return formatting.truncate(out, 200)
def urban(text): """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" headers = { "Referer": "http://m.urbandictionary.com" } if text: # clean and split the input text = text.lower().strip() parts = text.split() # if the last word is a number, set the ID to that number if parts[-1].isdigit(): id_num = int(parts[-1]) # remove the ID from the input string del parts[-1] text = " ".join(parts) else: id_num = 1 # fetch the definitions try: params = {"term": text} request = requests.get(define_url, params=params, headers=headers) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get definition: {}".format(e) page = request.json() if page['result_type'] == 'no_results': return 'Not found.' else: # get a random definition! try: request = requests.get(random_url, headers=headers) request.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Could not get definition: {}".format(e) page = request.json() id_num = None definitions = page['list'] if id_num: # try getting the requested definition try: definition = definitions[id_num - 1] def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate(def_text, 200) except IndexError: return 'Not found.' url = definition['permalink'] output = "[{}/{}] {} - {}".format(id_num, len(definitions), def_text, url) else: definition = random.choice(definitions) def_text = " ".join(definition['definition'].split()) # remove excess spaces def_text = formatting.truncate(def_text, 200) name = definition['word'] url = definition['permalink'] output = "\x02{}\x02: {} - {}".format(name, def_text, url) return output
def amazon(text, _parsed=False): """<query> -- Searches Amazon for query""" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/41.0.2228.0 Safari/537.36', 'Referer': 'http://www.amazon.com/' } params = { 'url': 'search-alias', 'field-keywords': text.strip() } if _parsed: # input is from a link parser, we need a specific URL request = requests.get(SEARCH_URL.format(_parsed), params=params, headers=headers) else: request = requests.get(SEARCH_URL.format(REGION), params=params, headers=headers) soup = BeautifulSoup(request.text) # check if there are any results on the amazon page results = soup.find('div', {'id': 'atfResults'}) if not results: if not _parsed: return "No results found." else: return # get the first item from the results on the amazon page results = results.find('ul', {'id': 's-results-list-atf'}).find_all('li', {'class': 's-result-item'}) item = results[0] asin = item['data-asin'] # here we use dirty html scraping to get everything we need title = formatting.truncate(item.find('h2', {'class': 's-access-title'}).text, 60) tags = [] # tags! if item.find('i', {'class': 'a-icon-prime'}): tags.append("$(b)Prime$(b)") if item.find('i', {'class': 'sx-bestseller-badge-primary'}): tags.append("$(b)Bestseller$(b)") # we use regex because we need to recognise text for this part # the other parts detect based on html tags, not text if re.search(r"(Kostenlose Lieferung|Livraison gratuite|FREE Shipping|Envío GRATIS" r"|Spedizione gratuita)", item.text, re.I): tags.append("$(b)Free Shipping$(b)") price = item.find('span', {'class': ['s-price', 'a-color-price']}).text # use a whole lot of BS4 and regex to get the ratings try: # get the rating rating = item.find('i', {'class': 'a-icon-star'}).find('span', {'class': 'a-icon-alt'}).text rating = re.search(r"([0-9]+(?:(?:\.|,)[0-9])?).*5", rating).group(1).replace(",", ".") # get the rating count pattern = re.compile(r'(product-reviews|#customerReviews)') num_ratings = item.find('a', {'href': pattern}).text.replace(".", ",") # format the rating and count into a nice string rating_str = "{}/5 stars ({} ratings)".format(rating, num_ratings) except AttributeError: rating_str = "No Ratings" # generate a short url if AFFILIATE_TAG: url = "http://www.amazon.com/dp/" + asin + "/?tag=" + AFFILIATE_TAG else: url = "http://www.amazon.com/dp/" + asin + "/" url = web.try_shorten(url) # join all the tags into a string tag_str = " - " + ", ".join(tags) if tags else "" # finally, assemble everything into the final string, and return it! if not _parsed: return colors.parse("$(b){}$(b) ({}) - {}{} - {}".format(title, price, rating_str, tag_str, url)) else: return colors.parse("$(b){}$(b) ({}) - {}{}".format(title, price, rating_str, tag_str))