def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({ 'url': url, 'client_id': api_key })) desc = "" if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50)) genre = "" if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre']) duration = "" if data['duration']: tracklength = float(data['duration']) / 60000 tracklength = re.match('(.*\...)', str(tracklength)).group(1) if tracklength: duration = u" {} mins -".format(tracklength) url = web.try_isgd(data['permalink_url']) return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}-{} {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, duration, data['playback_count'], data['download_count'], data['comment_count'], url)
def fact(inp, say=False, nick=False): """fact -- Gets a random fact from OMGFACTS.""" attempts = 0 # all of this is because omgfacts is fail while True: try: soup = http.get_soup('http://www.omg-facts.com/random') except: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue response = soup.find('a', {'class': 'surprise'}) link = response['href'] fact = ''.join(response.find(text=True)) if fact: fact = fact.strip() break else: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue url = web.try_isgd(link) return "{} - {}".format(fact, url)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = text.truncate_str(data['description'], 30) url = data['website'] authors = data['authors'][0] authors = authors[0] + u"\u200b" + authors[1:] stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_isgd(current_version['link']) if description: line_a = u"\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url) else: line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url) line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def soundcloudData(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({ 'url': url, 'client_id': api_key })) desc = "" if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50)) genre = "" if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre']) duration = "" if data['duration']: tracklength = float(data['duration']) / 60000 tracklength = re.match('(.*\...)', str(tracklength)).group(1) if tracklength: duration = u"{} mins".format(tracklength) url = web.try_isgd(data['permalink_url']) return u"\x02{}\x02 by \x02{}\x02 {}".format(data['title'], data['user']['username'], duration)
def sptfy(inp, sptfy=False): if sptfy: shortenurl = "http://sptfy.com/index.php" data = urlencode({ 'longUrl': inp, 'shortUrlDomain': 1, 'submitted': 1, "shortUrlFolder": 6, "customUrl": "", "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0 }) try: soup = http.get_soup(shortenurl, post_data=data, cookies=True) except: return inp try: link = soup.find('div', {'class': 'resultLink'}).text.strip() return link except: message = "Unable to shorten URL: {}".format( soup.find('div', { 'class': 'messagebox_text' }).find('p').text.split("<br/>")[0]) return message else: return web.try_isgd(inp)
def format_output(data): """ takes plugin data and returns two strings representing information about that plugin """ name = data["plugin_name"] description = formatting.truncate_str(data['description'], 30) url = data['website'] authors = data['authors'][0] authors = authors[0] + "\u200b" + authors[1:] stage = data['stage'] current_version = data['versions'][0] last_update = time.strftime('%d %B %Y %H:%M', time.gmtime(current_version['date'])) version_number = data['versions'][0]['version'] bukkit_versions = ", ".join(current_version['game_versions']) link = web.try_isgd(current_version['link']) if description: line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url) else: line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url) line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, last_update, link) return line_a, line_b
def format_artist(item): try: type, id = item["uri"].split(":")[1:] except IndexError: return "Could not find artist." url = web.try_isgd(gateway.format(type, id)) return u"\x02{}\x02 - \x02{}\x02".format(item["name"], url)
def fact(): """fact -- Gets a random fact from OMGFACTS.""" attempts = 0 # all of this is because omgfacts is fail while True: try: soup = http.get_soup("http://www.omg-facts.com/random") except (http.HTTPError, http.URLError): if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue response = soup.find("a", {"class": "surprise"}) link = response["href"] fact_data = "".join(response.find(text=True)) if fact_data: fact_data = fact_data.strip() break else: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue url = web.try_isgd(link) return "{} - {}".format(fact_data, url)
def format_album(item): try: type, id = item["uri"].split(":")[1:] more_data = http.get_json( "https://api.spotify.com/v1/albums/{}".format(id)) item.update(more_data) except Exception as e: return "Could not get album: {}".format(e) url = web.try_isgd(gateway.format(type, id)) return u"\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format( item["name"], ",".join(a["name"] for a in item["artists"]), url)
def qrcode(inp): """qrcode [link] returns a link for a QR code.""" args = { "cht": "qr", # chart type (QR) "chs": "200x200", # dimensions "chl": inp # data } link = http.prepare_url("http://chart.googleapis.com/chart", args) return web.try_isgd(link)
def format_track(item): try: type, id = item["uri"].split(":")[1:] except IndexError: return "Could not find track." url = web.try_isgd(gateway.format(type, id)) out = u"\x02{}\x02 by \x02{}\x02".format( item["name"], ",".join(a["name"] for a in item["artists"])) out += u" from \x02{}\x02".format( item["album"]["name"]) if "name" in item["album"] else "" out += u" - \x02{}\x02".format(url) return out
def answer(inp): """answer <query> -- find the answer to a question on Yahoo! Answers""" query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1" result = web.query(query, {"query": inp.strip()}).one() short_url = web.try_isgd(result["Link"]) # we split the answer and .join() it to remove newlines/extra spaces answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
def wolframalpha(inp, bot=None): """wa <query> -- Computes <query> using Wolfram Alpha.""" api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: missing api key" url = 'http://api.wolframalpha.com/v2/query?format=plaintext' result = http.get_xml(url, input=inp, appid=api_key) # get the URL for a user to view this query in a browser query_url = "http://www.wolframalpha.com/input/?i=" + \ http.quote_plus(inp.encode('utf-8')) short_url = web.try_isgd(query_url) pod_texts = [] for pod in result.xpath("//pod[@primary='true']"): title = pod.attrib['title'] if pod.attrib['id'] == 'Input': continue results = [] for subpod in pod.xpath('subpod/plaintext/text()'): subpod = subpod.strip().replace('\\n', '; ') subpod = re.sub(r'\s+', ' ', subpod) if subpod: results.append(subpod) if results: pod_texts.append(title + ': ' + ', '.join(results)) ret = ' - '.join(pod_texts) if not pod_texts: return 'No results.' ret = re.sub(r'\\(.)', r'\1', ret) def unicode_sub(match): return unichr(int(match.group(1), 16)) ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret) ret = text.truncate_str(ret, 250) if not ret: return 'No results.' return "{} - {}".format(ret, short_url)
def spotify_url(match): type = match.group(3) spotify_id = match.group(4) if type == "user": if "/" in spotify_id: spidsplit = spotify_id.split("/") uname = spidsplit[-3] utype = spidsplit[-2] spotify_id = spidsplit[-1] elif ":" in spotify_id: spidsplit = spotify_id.split(":") uname = spidsplit[-3] utype = spidsplit[-2] spotify_id = spidsplit[-1] url = "http://open.spotify.com/%s/%s/%s/%s" % (type, uname, utype, spotify_id) if utype == "playlist": auth = "Bearer BQDQ4-0f_26qPXsh6nUnN1zLiKlfgttROpj3iaismSjEJmR4Xqb-f4mOUvzsyZB_ABjEtMPic4ZYMlqqtS3j7khflDjxsZfpZ4gzVYmiuG6kTsedhrIGAj9W5IrzPC8XonGUVcK3y7Ryi2C0IENsWLonI54" data = http.get_json( "https://api.spotify.com/v1/users/{}/playlists/{}".format( uname, spotify_id), headers={"Authorization": auth}) data["owner"].update(http.get_json(data["owner"]["href"])) return u"Spotify Playlist: \x02{}\x02 by \x02{}\x02 - {} tracks - \x02{}\x02".format( data["name"], data["owner"]["display_name"], len(data["tracks"]), web.try_isgd(url)) else: return u"Please msg blha303 with this: %s | %s" % (url, match.group()) url = spuri.format(type, spotify_id) if type == "track": data = http.get_json( "https://api.spotify.com/v1/tracks/{}".format(spotify_id)) return u"Spotify Track: {}".format( format_track(data) if not "error" in data else "{status}: {message}".format(**data["error"])) elif type == "artist": data = http.get_json( "https://api.spotify.com/v1/artists/{}".format(spotify_id)) return u"Spotify Artist: {}".format( format_artist(data) if not "error" in data else "{status}: {message}".format(**data["error"])) elif type == "album": data = http.get_json( "https://api.spotify.com/v1/albums/{}".format(spotify_id)) return u"Spotify Album: {}".format( format_album(data) if not "error" in data else "{status}: {message}".format(**data["error"]))
def lyrics(inp): """lyrics <search> - Search AZLyrics.com for song lyrics""" if "pastelyrics" in inp: dopaste = True inp = inp.replace("pastelyrics", "").strip() else: dopaste = False soup = http.get_soup(url + inp.replace(" ", "+")) if "Try to compose less restrictive search query" in soup.find( 'div', { 'id': 'inn' }).text: return "No results. Check spelling." div = None for i in soup.findAll('div', {'class': 'sen'}): if "/lyrics/" in i.find('a')['href']: div = i break if div: title = div.find('a').text link = div.find('a')['href'] if dopaste: newsoup = http.get_soup(link) try: lyrics = newsoup.find( 'div', { 'style': 'margin-left:10px;margin-right:10px;' }).text.strip() pasteurl = " " + web.haste(lyrics) except Exception as e: pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format( str(e)) else: pasteurl = "" artist = div.find('b').text.title() lyricsum = div.find('div').text if "\r\n" in lyricsum.strip(): lyricsum = " / ".join( lyricsum.strip().split("\r\n")[0:4]) # truncate, format else: lyricsum = " / ".join( lyricsum.strip().split("\n")[0:4]) # truncate, format return u"\x02{}\x02 by \x02{}\x02 {}{} - {}".format( title, artist, web.try_isgd(link), pasteurl, lyricsum[:-3]) else: return "No song results. " + url + inp.replace(" ", "+")
def dinner(inp): """dinner - WTF IS FOR DINNER""" try: page = http.open(random_url) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) url = page.geturl() try: data = get_data(url) except ParseError as e: return "Could not parse recipe: {}".format(e) name = data["name"].strip() text = random.choice(phrases).format(name) return u"{} - {}".format(text, web.try_isgd(url))
def sptfy(inp, sptfy=False): shortenurl = "http://sptfy.com/index.php" data = urlencode({'longUrl': inp, 'shortUrlDomain': 1, 'submitted': 1, "shortUrlFolder": 6, "customUrl": "", "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0}) try: soup = http.get_soup(shortenurl, post_data=data, cookies=True) except: return inp try: link = soup.find('div', {'class': 'resultLink'}).text.strip() return link #if we can't shorten the url explain why and use isgd instead except: message = "Unable to shorten URL: %s" % \ soup.find('div', {'class': 'messagebox_text'}).find('p').text.split("<br/>")[0] print message return web.try_isgd(inp)
def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key})) if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50)) else: desc = "" if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre']) else: genre = "" url = web.try_isgd(data['permalink_url']) return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'], data['comment_count'], url)
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = text.truncate_str(item["Title"], 50) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format( item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER®\x02") # join all the tags together in a comma seperated string ("tag1, tag2, tag3") tag_text = u", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"])) return u"\x02{}\x02 ({}) - {} - {} - {}".format( title, price, rating, tag_text, url) else: return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def osrc(inp): """osrc <github user> -- Gets an Open Source Report Card for <github user>""" user_nick = inp.strip() url = api_url.format(user_nick) try: response = http.get_json(url) except (http.HTTPError, http.URLError): return "Couldn't find any stats for this user." response["nick"] = user_nick soup = BeautifulSoup(response["summary"]) response["work_time"] = soup.find("a", {"href": "#day"}).contents[0] response["short_url"] = web.try_isgd(user_url.format(user_nick)) return "{nick} is a {lang_user}. {nick} is a {hacker_type} " \ "who seems to {work_time} - {short_url}".format(**response)
def format_item(item, show_url=True): """ takes a newegg API item object and returns a description """ title = formatting.truncate_str(item["Title"], 50) # format the rating nicely if it exists if not item["ReviewSummary"]["TotalReviews"] == "[]": rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], item["ReviewSummary"]["TotalReviews"][1:-1]) else: rating = "No Ratings" if not item["FinalPrice"] == item["OriginalPrice"]: price = "{FinalPrice}, was {OriginalPrice}".format(**item) else: price = item["FinalPrice"] tags = [] if item["Instock"]: tags.append("\x02Stock Available\x02") else: tags.append("\x02Out Of Stock\x02") if item["FreeShippingFlag"]: tags.append("\x02Free Shipping\x02") if item["IsFeaturedItem"]: tags.append("\x02Featured\x02") if item["IsShellShockerItem"]: tags.append("\x02SHELL SHOCKER\u00AE\x02") # join all the tags together in a comma separated string ("tag1, tag2, tag3") tag_text = ", ".join(tags) if show_url: # create the item URL and shorten it url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"])) return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, tag_text, url) else: return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating, tag_text)
def dinner(): """dinner - WTF IS FOR DINNER""" try: page = http.open(random_url) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) url = page.geturl() try: data = get_data(url) except ParseError as e: return "Could not parse recipe: {}".format(e) name = data["name"].strip().upper() text = random.choice(phrases).format(name) if censor: text = text.replace("F**K", "F**K") return "{} - {}".format(text, web.try_isgd(url))
def osrc(inp): """osrc <github user> -- Gets an Open Source Report Card for <github user>""" user_nick = inp.strip() url = user_url.format(user_nick) try: soup = http.get_soup(url) except (http.HTTPError, http.URLError): return "Couldn't find any stats for this user." report = soup.find("div", {"id": "description"}).find("p").get_text() # Split and join to remove all the excess whitespace, slice the # string to remove the trailing full stop. report = " ".join(report.split())[:-1] short_url = web.try_isgd(url) return "{} - {}".format(report, short_url)
def recipe(inp): """recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided""" if inp: # get the recipe URL by searching try: search = http.get_soup(search_url, query=inp.strip()) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) # find the list of results result_list = search.find('div', {'class': 'found_results'}) if result_list: results = result_list.find_all('div', {'class': 'recipe_result'}) else: return "No results" # pick a random front page result result = random.choice(results) # extract the URL from the result url = base_url + result.find('div', { 'class': 'image-wrapper' }).find('a')['href'] else: # get a random recipe URL try: page = http.open(random_url) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) url = page.geturl() # use get_data() to get the recipe info from the URL try: data = get_data(url) except ParseError as e: return "Could not parse recipe: {}".format(e) name = data["name"].strip() return u"Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url))
def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key})) desc = "" if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50)) genre = "" if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre']) duration = "" if data['duration']: tracklength = float(data['duration']) / 60000 tracklength = re.match('(.*\...)', str(tracklength)).group(1) if tracklength: duration = u" {} mins -".format(tracklength) url = web.try_isgd(data['permalink_url']) return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}-{} {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, duration, data['playback_count'], data['download_count'], data['comment_count'], url)
def soundcloud(url, api_key): data = http.get_json(api_url + '/resolve.json?' + urlencode({ 'url': url, 'client_id': api_key })) if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50)) else: desc = "" if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre']) else: genre = "" url = web.try_isgd(data['permalink_url']) return u"SoundCloud track: \x02{}\x02 by \x02{}user\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'], data['comment_count'], url)
def recipe(text): """recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided""" if text: # get the recipe URL by searching try: search = http.get_soup(search_url, query=text.strip()) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) # find the list of results result_list = search.find('div', {'class': 'found_results'}) if result_list: results = result_list.find_all('div', {'class': 'recipe_result'}) else: return "No results" # pick a random front page result result = random.choice(results) # extract the URL from the result url = base_url + result.find('div', {'class': 'image-wrapper'}).find('a')['href'] else: # get a random recipe URL try: page = http.open(random_url) except (http.HTTPError, http.URLError) as e: return "Could not get recipe: {}".format(e) url = page.geturl() # use get_data() to get the recipe info from the URL try: data = get_data(url) except ParseError as e: return "Could not parse recipe: {}".format(e) name = data["name"].strip() return "Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url))
def lyrics(inp): """lyrics <search> - Search AZLyrics.com for song lyrics""" if "pastelyrics" in inp: dopaste = True inp = inp.replace("pastelyrics", "").strip() else: dopaste = False soup = http.get_soup(url + inp.replace(" ", "+")) if "Try to compose less restrictive search query" in soup.find('div', {'id': 'inn'}).text: return "No results. Check spelling." div = None for i in soup.findAll('div', {'class': 'sen'}): if "/lyrics/" in i.find('a')['href']: div = i break if div: title = div.find('a').text link = div.find('a')['href'] if dopaste: newsoup = http.get_soup(link) try: lyrics = newsoup.find('div', {'style': 'margin-left:10px;margin-right:10px;'}).text.strip() pasteurl = " " + web.haste(lyrics) except Exception as e: pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format(str(e)) else: pasteurl = "" artist = div.find('b').text.title() lyricsum = div.find('div').text if "\r\n" in lyricsum.strip(): lyricsum = " / ".join(lyricsum.strip().split("\r\n")[0:4]) # truncate, format else: lyricsum = " / ".join(lyricsum.strip().split("\n")[0:4]) # truncate, format return u"\x02{}\x02 by \x02{}\x02 {}{} - {}".format(title, artist, web.try_isgd(link), pasteurl, lyricsum[:-3]) else: return "No song results. " + url + inp.replace(" ", "+")
def soundcloud(url, api_key): data = http.get_json(api_url + "/resolve.json?" + urlencode({"url": url, "client_id": api_key})) if data["description"]: desc = ": {} ".format(formatting.truncate_str(data["description"], 50)) else: desc = "" if data["genre"]: genre = "- Genre: \x02{}\x02 ".format(data["genre"]) else: genre = "" url = web.try_isgd(data["permalink_url"]) return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( data["title"], data["user"]["username"], desc, genre, data["playback_count"], data["download_count"], data["comment_count"], url, )
def weather(text, nick="", reply=None, db=None, notice=None, bot=None): global dev_key, wunder_key dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None) wunder_key = bot.config.get("api_keys", {}).get("wunderground", None) """weather <location> -- Gets weather data for <location>.""" if not wunder_key: return "This command requires a Weather Underground API key." if not dev_key: return "This command requires a Google Developers Console API key." save = True if '@' in text: nick = text.split('@')[1].strip() loc = database.get(db,'users','location','nick',nick) if not loc: return "No location stored for {}.".format(nick.encode('ascii', 'ignore')) else: location = loc try: location_data = find_location(loc) except APIError as e: return e else: if not text: loc = database.get(db,'users','location','nick',nick) # use find_location to get location data from the user input try: location_data = find_location(loc) except APIError as e: return e if not loc: notice(weather.__doc__) return else: if " save" in text: text = text.replace(' save','') database.set(db,'users','location',text,'nick',nick) loc = text try: location_data = find_location(text) except APIError as e: return e if text and save: database.set(db,'users','location',text,'nick',nick) formatted_location = "{lat},{lng}".format(**location_data) url = wunder_api.format(wunder_key, formatted_location) response = requests.get(url).json() if response['response'].get('error'): return "{}".format(response['response']['error']['description']) forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0] forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][1] # put all the stuff we want to use in a dictionary for easy formatting of the output weather_data = { "place": response['current_observation']['display_location']['full'], "conditions": response['current_observation']['weather'], "temp_f": response['current_observation']['temp_f'], "temp_c": response['current_observation']['temp_c'], "humidity": response['current_observation']['relative_humidity'], "wind_kph": response['current_observation']['wind_kph'], "wind_mph": response['current_observation']['wind_mph'], "wind_direction": response['current_observation']['wind_dir'], "today_conditions": forecast_today['conditions'], "today_high_f": forecast_today['high']['fahrenheit'], "today_high_c": forecast_today['high']['celsius'], "today_low_f": forecast_today['low']['fahrenheit'], "today_low_c": forecast_today['low']['celsius'], "tomorrow_conditions": forecast_tomorrow['conditions'], "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'], "tomorrow_high_c": forecast_tomorrow['high']['celsius'], "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'], "tomorrow_low_c": forecast_tomorrow['low']['celsius'] } # Get the more accurate URL if available, if not, get the generic one. if "?query=," in response["current_observation"]['ob_url']: weather_data['url'] = web.try_isgd(response["current_observation"]['forecast_url']) else: weather_data['url'] = web.try_isgd(response["current_observation"]['ob_url']) reply("{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, " "Wind: {wind_mph}MPH/{wind_kph}KPH {wind_direction}, \x02Today:\x02 {today_conditions}, " "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. " "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, " "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(**weather_data))