Ejemplo n.º 1
0
def imdb(text):
    """imdb <movie> - gets information about <movie> from IMDb"""

    strip = text.strip()

    if id_re.match(strip):
        content = http.get_json("http://www.omdbapi.com/", i=strip)
    else:
        content = http.get_json("http://www.omdbapi.com/", t=strip)

    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/{}'.format(content['imdbID'])

        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        out += ' %(URL)s'
        return out % content
    else:
        return 'Unknown error.'
Ejemplo n.º 2
0
def rottentomatoes(inp, bot=None):
    """rt <title> -- gets ratings for <title> from Rotten Tomatoes"""

    api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None)
    if not api_key:
        return "error: no api key set"

    title = inp.strip()

    results = http.get_json(movie_search_url, q=title, apikey=api_key)
    if results['total'] == 0:
        return 'No results.'

    movie = results['movies'][0]
    title = movie['title']
    movie_id = movie['id']
    critics_score = movie['ratings']['critics_score']
    audience_score = movie['ratings']['audience_score']
    url = movie['links']['alternate']

    if critics_score == -1:
        return

    reviews = http.get_json(movie_reviews_url % movie_id, apikey=api_key, review_type='all')
    review_count = reviews['total']

    fresh = critics_score * review_count / 100
    rotten = review_count - fresh

    return "{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
           "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)
Ejemplo n.º 3
0
def youtime(text):
    """youtime <query> -- Gets the total run time of the first YouTube search result for <query>."""
    request = http.get_json(search_api_url, q=text)

    if 'error' in request:
        return 'error performing search'

    if request['data']['totalItems'] == 0:
        return 'no results found'

    video_id = request['data']['items'][0]['id']
    request = http.get_json(api_url.format(video_id))

    if request.get('error'):
        return
    data = request['data']

    if not data.get('duration'):
        return

    length = data['duration']
    views = data['viewCount']
    total = int(length * views)

    length_text = timeformat.format_time(length, simple=True)
    total_text = timeformat.format_time(total, accuracy=8)

    return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
           'a total run time of {}!'.format(data['title'], length_text, views,
                                            total_text)
Ejemplo n.º 4
0
def get_series_eps(token, seriesid):
    head = {'Authorization': 'Bearer ' + token}

    episodes = http.get_json(episodes_url.format(seriesid), headers=head)

    if episodes['links']['last'] > 1:
        params = {'page': episodes['links']['last']}
        episodes = http.get_json(episodes_url.format(seriesid), headers=head)

    return episodes['data']
Ejemplo n.º 5
0
def char(text, bot):
	"""finds a toon and drops general information on them"""
	classes = ["Warrior", "Paladin", "Hunter", "Rogue", "Priest", "Death Knight", "Shaman", "Mage", "Warlock", "Monk", "Druid", "Demon Hunter"]
	races = ["Human", "Orc", "Dwarf", "Night Elf", "Undead", "Tauren", "Gnome", "Troll", "Goblin", "Blood Elf", "Draenei"]
	genders = ["Male", "Female"]
	factions = ["Alliance", "Horde", "Neutral"]
	try:
		api_key = bot.config.get("api_keys", {}).get("wow", None)
	except:
		return "No api key found."
	try:
		if text.count(' ') is 2: 
			name, realm, location = text.split(" ")
		else: 
			name, realm = text.split(" ")
			location = "us"
	except:
		return "You need to specify a realm."
	try:
		name = urllib.parse.quote(name)
		location = location.lower()
		if location == 'us':
			data = http.get_json("https://us.api.battle.net/wow/character/{}/{}?fields=guild&locale=en_US&apikey={}".format(realm, name, api_key))
		elif location == 'eu':
			print("location is eu")
			data = http.get_json("https://eu.api.battle.net/wow/character/{}/{}?fields=guild&locale=en_US&apikey={}".format(realm, name, api_key))
		else:
			return "I didn't understand that location. Use US, or EU."
	except Exception as e:
		traceback.print_exc()
		return e
	try:
		battlegroup = data["battlegroup"]
		clas = classes[(data["class"]-1)]
		gender = genders[(data["gender"])]
		ach_points = data["achievementPoints"]
		level = data["level"]
		race_n = data["race"]
		if race_n == 25: race = "Pandaren"
		elif race_n == 26: race = "Pandaren"
		else: race = races[(race_n - 1)]
		faction = factions[(data["faction"])]
		kills = data["totalHonorableKills"]
		name = data["name"]
		realm = data["realm"]
		try:
			guild = data["guild"]["name"]
		except:
			guild = "Forever Alone (No Guild)"
	except Exception as e:
		return "Failed to fetch data. Error: {}.".format(e)

	return "\x02{}\x02, \x02{}\x02 is a level {}, {} {} {}. They have \x02{}\x02 kills, \x02{}\x02 achievement points, and they are a part of \x02{}\x02. Their guild is \x02{}\x02.".format(name, realm, level, gender, race, clas, kills, ach_points, battlegroup, guild)
Ejemplo n.º 6
0
def get_series_info(token, seriesname):
    head = {'Authorization': 'Bearer ' + token}

    params = {'name': http.quote(seriesname)}
    series = http.get_json(search_url, headers=head, params=params)['data']
    _series = [s for s in series if s['network'] is not None]

    if len(_series) > 0:
        seriesid = _series[0]['id']
    else:
        seriesid = series[0]['id']

    return http.get_json(series_url.format(seriesid), headers=head)['data']
Ejemplo n.º 7
0
def trans(text, reply, event):
    """
    <language or language code> <text to translate> - Translation is
    Powered by Yandex https://translate.yandex.com
    """
    api_key = bot.config.get_api_key("yandex_translate")
    if not api_key:
        return "This command requires a Yandex Translate API key"

    inp = text.split(' ', 1)
    if len(inp) < 2:
        event.notice_doc()
        return None

    lang = inp[0].replace(':', '')
    text = inp[1]
    if lang.title() in lang_dict.keys():
        lang = lang_dict[lang.title()]
    elif lang not in lang_dict.values() and lang not in lang_dir:
        return "Please specify a valid language, language code, to translate to. Use .langlist for more information " \
               "on language codes and valid translation directions."
    url = api_url + "translate"
    params = {'key': api_key, 'lang': lang, 'text': text, 'options': 1}

    try:
        data = http.get_json(url, params=params)
    except Exception:
        reply("Unknown error occurred.")
        raise

    out = "Translation ({}): {}".format(data['lang'], data['text'][0])
    return out
Ejemplo n.º 8
0
def get_weather(text, nick, reply, message, notice_doc, db):
    api_key = bot.config.get_api_key("darksky")
    if not api_key:
        raise Exception("This command requires a DarkSky API key.")

    if not text:
        location = location_cache.get(nick.lower())
        if not location:
            notice_doc()
            return None, None
    else:
        location = text
        add_location(nick, location, db)

    try:
        location_data = geocode(location)
    except:
        raise Exception(
            "Google Geocoding API error, please try again in a few minutes.")

    try:
        weather_data = http.get_json(
            weather_url.format(api_key,
                               location_data['geometry']['location']['lat'],
                               location_data['geometry']['location']['lng']))
    except:
        raise Exception(
            "DarkSky API error, please try again in a few minutes.")

    return location_data, weather_data
Ejemplo n.º 9
0
def xkcd_info(xkcd_id, url=False):
    """ takes an XKCD entry ID and returns a formatted string """
    data = http.get_json("http://www.xkcd.com/" + xkcd_id + "/info.0.json")
    date = "{} {} {}".format(data['day'], months[int(data['month'])], data['year'])
    if url:
        url = " | http://xkcd.com/" + xkcd_id.replace("/", "")
    return "xkcd: \x02{}\x02 ({}){}".format(data['title'], date, url if url else "")
Ejemplo n.º 10
0
def locate(text):
    """<location> - Finds <location> on Google Maps."""
    api_key = bot.config.get_api_key("google").get('access', None)
    if not api_key:
        return "This command requires a Google Developers Console API key."

    # Use the Geocoding API to get co-ordinates from the input
    params = {"address": text, "key": api_key}
    bias = bot.config.get('region_bias_cc', None)
    if bias:
        params['region'] = bias

    json = http.get_json(geocode_api, params=params)

    error = check_status(json['status'])
    if error:
        return error

    result = json['results'][0]

    location_name = result['formatted_address']
    location = result['geometry']['location']
    formatted_location = "{lat},{lng},16z".format(**location)

    url = web.try_shorten("https://google.com/maps/@" + formatted_location +
                          "/data=!3m1!1e3")
    tags = result['types']

    # if 'political' is not the only tag, remove it.
    if not tags == ['political']:
        tags = [x for x in result['types'] if x != 'political']

    tags = ", ".join(tags).replace("_", " ")

    return "\x02{}\x02 - {} ({})".format(location_name, url, tags)
Ejemplo n.º 11
0
def stock(text):
    """<symbol> - Looks up stock information"""
    api_key = bot.config.get_api_key("alphavantage")
    if not api_key:
        return "This command requires an Alpha Vantage API key."

    params = {'function': 'GLOBAL_QUOTE', 'apikey': api_key, 'symbol': text}
    quote = http.get_json(url, params=params)

    if not quote.get("Global Quote"):
        return "Unknown ticker symbol '{}'".format(text)

    quote = {k.split(' ')[-1]:tryParse(v) for k,v in quote['Global Quote'].items()}

    quote['url'] = web.try_shorten('https://finance.yahoo.com/quote/' + text)

    try:
        if float(quote['change']) < 0:
            quote['color'] = "5"
        else:
            quote['color'] = "3"

        return "{symbol} - ${price:.2f} " \
            "\x03{color}{change:+.2f} ({percent:.2f}%)\x0F " \
            "H:${high:.2f} L:${low:.2f} O:${open:.2f} " \
            "Volume:{volume} - {url}".format(**quote)
    except:
        return "Error parsing return data, please try again later."
Ejemplo n.º 12
0
def shorten(url):
    """ shortens a URL with the goo.gl API """
    api_key = bot.config.get_api_key('noxd')
    postdata = {'api_key': api_key, 'link': url}

    request = http.get_json(short_url, data=postdata, get_method='POST')
    return "{}/{}".format(short_url, request['Id'])
Ejemplo n.º 13
0
def imdb(text):
    """<movie> [year] - Gets information about a movie from IMDb."""
    api_key = bot.config.get_api_key("omdb")
    if not api_key:
        return "This command requires an API key from omdb.com."

    year = ""
    if text.split()[-1].isdigit():
        text, year = ' '.join(text.split()[:-1]), text.split()[-1]

    try:
        content = http.get_json("http://www.omdbapi.com/",
                                apikey=api_key,
                                t=text,
                                y=year,
                                plot='short',
                                r='json')
    except:
        return "OMDB API error, please try again in a few minutes."

    if content['Response'] == 'False':
        return content['Error']
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content

        out = '\x02{Title}\x02 ({Year}) ({Genre}): {Plot}'
        if content['Runtime'] != 'N/A':
            out += ' \x02{Runtime}\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02{imdbRating}/10\x02 with \x02{imdbVotes}\x02 votes. '
        out += web.try_shorten('{URL}'.format(**content))
        return out.format(**content)
    else:
        return "Error parsing movie information."
Ejemplo n.º 14
0
def get_sound_info(game, search):
    search = search.replace(" ", "+")
    try:
        data = http.get_json("http://p2sounds.blha303.com.au/search/%s/%s?format=json" % (game, search))
    except urllib.error.HTTPError as e:
        return "Error: " + json.loads(e.read())["error"]
    items = []
    for item in data["items"]:
        if "music" in game:
            textsplit = item["text"].split('"')
            text = ""
            for i in range(len(textsplit)):
                if i % 2 != 0 and i < 6:
                    if text:
                        text += " / " + textsplit[i]
                    else:
                        text = textsplit[i]
        else:
            text = item["text"]
        items.append("{} - {} {}".format(item["who"],
                                         text if len(text) < 325 else text[:325] + "...",
                                         item["listen"]))
    if len(items) == 1:
        return items[0]
    else:
        return "{} (and {} others: {})".format(items[0], len(items) - 1, web.paste("\n".join(items)))
Ejemplo n.º 15
0
def twitch_lookup(location):
    locsplit = location.split("/")
    if len(locsplit) > 1 and len(locsplit) == 3:
        channel = locsplit[0]
        _type = locsplit[1]  # should be b or c
        _id = locsplit[2]
    else:
        channel = locsplit[0]
        _type = None
        _id = None
    fmt = "{}: {} playing {} ({})"  # Title: nickname playing Game (x views)
    if _type and _id:
        if _type == "b":  # I haven't found an API to retrieve broadcast info
            soup = http.get_soup("http://twitch.tv/" + location)
            title = soup.find('span', {'class': 'real_title js-title'}).text
            playing = soup.find('a', {'class': 'game js-game'}).text
            views = soup.find('span', {'id': 'views-count'}).text + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return html.unescape(fmt.format(title, channel, playing, views))
        elif _type == "c":
            data = http.get_json("https://api.twitch.tv/kraken/videos/" +
                                 _type + _id)
            title = data['title']
            playing = data['game']
            views = str(data['views']) + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return html.unescape(fmt.format(title, channel, playing, views))
    else:
        data = http.get_json("https://api.twitch.tv/kraken/streams?channel=" +
                             channel)
        if data["streams"]:
            title = data["streams"][0]["channel"]["status"]
            playing = data["streams"][0]["game"]
            v = data["streams"][0]["viewers"]
            viewers = "\x033\x02Online now!\x02\x0f " + str(v) + " viewer" + (
                "s" if v != 1 else "")
            return html.unescape(fmt.format(title, channel, playing, viewers))
        else:
            try:
                data = http.get_json("https://api.twitch.tv/kraken/channels/" +
                                     channel)
            except Exception:
                return "Unable to get channel data. Maybe channel is on justin.tv instead of twitch.tv?"
            title = data['status']
            playing = data['game']
            viewers = "\x034\x02Offline\x02\x0f"
            return html.unescape(fmt.format(title, channel, playing, viewers))
Ejemplo n.º 16
0
def twitch_lookup(location):
    locsplit = location.split("/")
    if len(locsplit) > 1 and len(locsplit) == 3:
        channel = locsplit[0]
        type = locsplit[1]  # should be b or c
        id = locsplit[2]
    else:
        channel = locsplit[0]
        type = None
        id = None
    h = HTMLParser()
    fmt = "{}: {} playing {} ({})"  # Title: nickname playing Game (x views)
    if type and id:
        if type == "b":  # I haven't found an API to retrieve broadcast info
            soup = http.get_soup("http://twitch.tv/" + location)
            title = soup.find('span', {'class': 'real_title js-title'}).text
            playing = soup.find('a', {'class': 'game js-game'}).text
            views = soup.find('span', {'id': 'views-count'}).text + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return h.unescape(fmt.format(title, channel, playing, views))
        elif type == "c":
            data = http.get_json("https://api.twitch.tv/kraken/videos/" + type + id)
            title = data['title']
            playing = data['game']
            views = str(data['views']) + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return h.unescape(fmt.format(title, channel, playing, views))
    else:
        data = http.get_json("http://api.justin.tv/api/stream/list.json?channel=" + channel)
        if data and len(data) >= 1:
            data = data[0]
            title = data['title']
            playing = data['meta_game']
            viewers = "\x033\x02Online now!\x02\x0f " + str(data["channel_count"]) + " viewer"
            print(viewers)
            viewers = viewers + "s" if not " 1 view" in viewers else viewers
            print(viewers)
            return h.unescape(fmt.format(title, channel, playing, viewers))
        else:
            try:
                data = http.get_json("https://api.twitch.tv/kraken/channels/" + channel)
            except:
                return
            title = data['status']
            playing = data['game']
            viewers = "\x034\x02Offline\x02\x0f"
            return h.unescape(fmt.format(title, channel, playing, viewers))
Ejemplo n.º 17
0
def format_game(app_id, show_url=True):
    """
    Takes a Steam Store app ID and returns a formatted string with data about that app ID
    :type app_id: string
    :return: string
    """
    params = {'appids': app_id}

    try:
        data = http.get_json(API_URL, params=params, timeout=15)
    except Exception as e:
        return f"Could not get game info: {e}"

    game = data[app_id]["data"]

    # basic info
    out = ["\x02{}\x02".format(game["name"])]

    desc = " ".join(formatting.strip_html(game["about_the_game"]).split())
    out.append(formatting.truncate(desc, 75))

    # genres
    try:
        genres = ", ".join([g['description'] for g in game["genres"]])
        out.append("\x02{}\x02".format(genres))
    except KeyError:
        # some things have no genre
        pass

    # release date
    if game['release_date']['coming_soon']:
        out.append("coming \x02{}\x02".format(game['release_date']['date']))
    else:
        out.append("released \x02{}\x02".format(game['release_date']['date']))

    # pricing
    if game['is_free']:
        out.append("\x02free\x02")
    elif not game.get("price_overview"):
        # game has no pricing, it's probably not released yet
        pass
    else:
        price = game['price_overview']

        # the steam API sends prices as an int like "9999" for $19.99, we divmod to get the actual price
        if price['final'] == price['initial']:
            out.append("\x02$%d.%02d\x02" % divmod(price['final'], 100))
        else:
            price_now = "$%d.%02d" % divmod(price['final'], 100)
            price_original = "$%d.%02d" % divmod(price['initial'], 100)

            out.append("\x02{}\x02 (was \x02{}\x02)".format(price_now, price_original))

    if show_url:
        url = web.try_shorten(STORE_URL.format(game['steam_appid']))
        out.append(url)

    return " - ".join(out)
Ejemplo n.º 18
0
async def cnam(text):
    """<10 digit number> - Get CNAM information for a number via EveryoneAPI."""
    url = 'https://api.everyoneapi.com/v1/phone/{}'.format(text)
    api_key = bot.config.get_api_key('everyoneapi')

    try:
        data = http.get_json(url,
                             account_sid=api_key["account_sid"],
                             auth_token=api_key["auth_token"],
                             data="name,carrier,location,linetype,cnam")
    except Exception as e:
        return "EveryoneAPI error, please try again in a few minutes."

    out = [u"Caller ID info for {number}".format(**data)]

    if data['data'].get('expanded_name', None) or data['data'].get(
            'cnam', None):
        if data['data'].get('expanded_name', None):
            names = [
                x for x in [
                    data['data'].get('expanded_name', {}).get('first', None),
                    data['data'].get('expanded_name', {}).get('last', None)
                ] if x != None
            ]
            if names:
                out.append(u"Name: {}".format(' '.join(names).strip()))
        else:
            if data['data']['cnam'].lower() != 'unknown':
                out.append(u"Name: {}".format(data['data']['cnam']).strip())

    if data.get('type', None) or data['data'].get('linetype', None):
        types = [
            x for x in [
                data.get('type', 'unknown'), data['data'].get(
                    'linetype', 'unknown')
            ] if x != 'unknown'
        ]
        if types:
            out.append(u"Type: {}".format(', '.join(types)))

    if data['data'].get('location', {}).get('city', None) or data['data'].get(
            'location', {}).get('state', None):
        locs = [
            x for x in [
                data['data'].get('location', {}).get('city', None),
                data['data'].get('location', {}).get('state', None)
            ] if x != None
        ]
        if locs:
            out.append(u"Location: {}".format(', '.join(locs)))

    if data['data'].get('carrier', {}).get('name', None):
        out.append(u"Carrier: {}".format(data['data']['carrier']['name']))

    if len(out) > 1:
        return u"; ".join(out)
    else:
        return "No caller ID info for {}".format(text)
Ejemplo n.º 19
0
def spotify(text):
    """[-track|-artist|-album] <search term> - Search for specified media via Spotify; defaults to track."""
    api_key = bot.config.get_api_key("spotify")
    if not api_key:
        return "This command requires a Spotify API key."

    text = text.split(" ")
    if len(text) > 1 and text[0] in ["-track", "-artist", "-album"]:
        kind, query = text.pop(0)[1:], " ".join(text)
    else:
        kind, query = "track", " ".join(text)

    try:
        params = {"grant_type": "client_credentials"}
        access_token = http.get_json("https://accounts.spotify.com/api/token",
                                     auth=True,
                                     auth_keys=api_key,
                                     get_method="POST",
                                     data=params)["access_token"]
    except Exception as e:
        return f"Could not get access token: {e}"

    try:
        data = http.get_json(
            "https://api.spotify.com/v1/search/",
            type=kind,
            q=query,
            limit=1,
            headers={"Authorization": "Bearer " + access_token})
    except Exception as e:
        return f"Could not get {kind} information: {e}"

    try:
        type, id = data[kind + "s"]["items"][0]["uri"].split(":")[1:]
    except IndexError as e:
        return f"Could not find {kind}."
    url = sptfy(gateway.format(type, id))

    if kind == "track":
        return "\x02{}\x02 by \x02{}\x02 - {}".format(
            data[kind + "s"]["items"][0]["name"],
            data[kind + "s"]["items"][0]["artists"][0]["name"], url)
    else:
        return "\x02{}\x02 - {}".format(data[kind + "s"]["items"][0]["name"],
                                        url)
Ejemplo n.º 20
0
def goog_trans(api_key, text, slang, tlang):
    url = 'https://www.googleapis.com/language/translate/v2'

    if len(text) > max_length:
        return "This command only supports input of less then 100 characters."

    if slang:
        parsed = http.get_json(url, key=api_key, q=text, source=slang, target=tlang, format="text")
    else:
        parsed = http.get_json(url, key=api_key, q=text, target=tlang, format="text")

        #if not 200 <= parsed['responseStatus'] < 300:
        #   raise IOError('error with the translation server: %d: %s' % (
        #           parsed['responseStatus'], parsed['responseDetails']))
    if not slang:
        return unescape('(%(detectedSourceLanguage)s) %(translatedText)s' %
                        (parsed['data']['translations'][0]))
    return unescape('%(translatedText)s' % parsed['data']['translations'][0])
Ejemplo n.º 21
0
def get_video_description(video_id):
    request = http.get_json(api_url.format(video_id))

    if request.get('error'):
        return

    data = request['data']

    out = '\x02{}\x02'.format(data['title'])

    if not data.get('duration'):
        return out

    length = data['duration']
    out += ' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))

    if 'ratingCount' in data:
        # format
        likes = plural(int(data['likeCount']), "like")
        dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")

        percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
        out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
                                                    dislikes, percent)

    if 'viewCount' in data:
        views = data['viewCount']
        out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])

    try:
        uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
            "$t"]
    except:
        uploader = data["uploader"]

    upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
    out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
                                                time.strftime("%Y.%m.%d", upload_time))

    if 'contentRating' in data:
        out += ' - \x034NSFW\x02'

    return out
Ejemplo n.º 22
0
def twitch_lookup(location):
    locsplit = location.split("/")
    if len(locsplit) > 1 and len(locsplit) == 3:
        channel = locsplit[0]
        _type = locsplit[1]  # should be b or c
        _id = locsplit[2]
    else:
        channel = locsplit[0]
        _type = None
        _id = None
    fmt = "{}: {} playing {} ({})"  # Title: nickname playing Game (x views)
    if _type and _id:
        if _type == "b":  # I haven't found an API to retrieve broadcast info
            soup = http.get_soup("http://twitch.tv/" + location)
            title = soup.find("span", {"class": "real_title js-title"}).text
            playing = soup.find("a", {"class": "game js-game"}).text
            views = soup.find("span", {"id": "views-count"}).text + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return html.unescape(fmt.format(title, channel, playing, views))
        elif _type == "c":
            data = http.get_json("https://api.twitch.tv/kraken/videos/" + _type + _id)
            title = data["title"]
            playing = data["game"]
            views = str(data["views"]) + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return html.unescape(fmt.format(title, channel, playing, views))
    else:
        data = http.get_json("https://api.twitch.tv/kraken/streams?channel=" + channel)
        if data["streams"]:
            title = data["streams"][0]["channel"]["status"]
            playing = data["streams"][0]["game"]
            v = data["streams"][0]["viewers"]
            viewers = "\x033\x02Online now!\x02\x0f " + str(v) + " viewer" + ("s" if v != 1 else "")
            return html.unescape(fmt.format(title, channel, playing, viewers))
        else:
            try:
                data = http.get_json("https://api.twitch.tv/kraken/channels/" + channel)
            except Exception:
                return "Unable to get channel data. Maybe channel is on justin.tv instead of twitch.tv?"
            title = data["status"]
            playing = data["game"]
            viewers = "\x034\x02Offline\x02\x0f"
            return html.unescape(fmt.format(title, channel, playing, viewers))
Ejemplo n.º 23
0
def pvp(text, bot):
	"""finds a toon and drops their pvp info"""
	try:
		api_key = bot.config.get("api_keys", {}).get("wow", None)
	except:
		return "No api key found."
	try:
		if text.count(' ') is 2: 
			name, realm, location = text.split(" ")
		else: 
			name, realm = text.split(" ")
			location = "us"
	except:
		return "You need to specify a realm."
	try:
		name = urllib.parse.quote(name)
		location = location.lower()
		if location == 'us':
			data = http.get_json("https://us.api.battle.net/wow/character/{}/{}?fields=pvp&locale=en_US&apikey={}".format(realm, name, api_key))
		elif location == 'eu':
			data = http.get_json("https://eu.api.battle.net/wow/character/{}/{}?fields=pvp&locale=en_US&apikey={}".format(realm, name, api_key))
	except Exception as e:
		return "Something went wrong. Error {}".format(e)
	try:
		twos_rating = data["pvp"]["brackets"]["ARENA_BRACKET_2v2"]["rating"]
		twos_lost = data["pvp"]["brackets"]["ARENA_BRACKET_2v2"]["seasonLost"]
		twos_won = data["pvp"]["brackets"]["ARENA_BRACKET_2v2"]["seasonWon"]
		threes_rating = data["pvp"]["brackets"]["ARENA_BRACKET_3v3"]["rating"]
		threes_lost = data["pvp"]["brackets"]["ARENA_BRACKET_3v3"]["seasonLost"]
		threes_won = data["pvp"]["brackets"]["ARENA_BRACKET_3v3"]["seasonWon"]
		skrims_rating = data["pvp"]["brackets"]["ARENA_BRACKET_2v2_SKIRMISH"]["rating"]
		skrims_lost = data["pvp"]["brackets"]["ARENA_BRACKET_2v2_SKIRMISH"]["seasonLost"]
		skrims_won = data["pvp"]["brackets"]["ARENA_BRACKET_2v2_SKIRMISH"]["seasonWon"]
		rbg_rating = data["pvp"]["brackets"]["ARENA_BRACKET_RBG"]["rating"]
		rbg_lost = data["pvp"]["brackets"]["ARENA_BRACKET_RBG"]["seasonLost"]
		rbg_won = data["pvp"]["brackets"]["ARENA_BRACKET_RBG"]["seasonWon"]
		#fives_rating = data["pvp"]["brackets"]["UNKNOWN"]["rating"] //fives is gone... For now.
		#fives_lost = data["pvp"]["brackets"]["UNKNOWN"]["weeklyLost"]
		#fives_won = data["pvp"]["brackets"]["UNKNOWN"]["weeklyWon"]
	except Exception as e:
		return "Error: {}".format(e)
	return "2v2s: {} \x02\x033 {} \x03\x02-\x02\x034 {} \x03\x02 | 3v3s: {} \x02\x033 {} \x03\x02-\x02\x034 {} \x03\x02 | RBGs: {} \x02\x033 {} \x03\x02-\x02\x034 {} \x03\x02 | Skrims: {} \x02\x033 {} \x03\x02-\x02\x034 {} \x03\x02".format(twos_rating, twos_won, twos_lost, threes_rating, threes_won, threes_lost, rbg_rating, rbg_won, rbg_lost, skrims_rating, skrims_won, skrims_lost)
Ejemplo n.º 24
0
def github_search(text):
    """git <search query> -- Search github for a specific repo"""
    try:
        data = http.get_json(("https://api.github.com/search/repositories?q=foo&sort=stars&order=desc"), q=text.strip())
    except Exception as e:
        return "Could not find repo: {}".format(e)
    try:
        reponame = data["items"][0]["full_name"]
        repourl = data["items"][0]["html_url"]
    except IndexError:
        return "Could not find repo"
    return u"\x02{}\x02 - \x02{}\x02".format(reponame, repourl)
Ejemplo n.º 25
0
def fact(reply):
    """- Gets a random fact about numbers or dates."""
    fact_type = random.choice(types)
    try:
        json = http.get_json(
            'http://numbersapi.com/random/{}?json'.format(fact_type))
    except Exception:
        reply("There was an error contacting the numbersapi.com API.")
        raise

    response = json['text']
    return response
Ejemplo n.º 26
0
def load_key():
    api_key = bot.config.get_api_key("yandex_translate")
    if not api_key:
        return

    url = api_url + "getLangs"
    params = {'key': api_key, 'ui': 'en'}
    data = http.get_json(url, params=params)
    lang_dict.clear()
    lang_dir.clear()
    lang_dict.update(dict((v, k) for k, v in data['langs'].items()))
    lang_dir.extend(data['dirs'])
Ejemplo n.º 27
0
def domainr(text):
    """<domain> - uses domain.nr's API to search for a domain, and similar domains
    :type text: str
    """
    try:
        data = http.get_json('http://domai.nr/api/json/search?q=' + text)
    except (http.URLError, http.HTTPError):
        return "Unable to get data for some reason. Try again later."
    if data['query'] == "":
        return "An error occurred: {status} - {message}".format(**data['error'])

    domains = [format_domain(domain) for domain in data["results"]]
    return "Domains: {}".format(", ".join(domains))
Ejemplo n.º 28
0
def youtube(text):
    """youtube <query> -- Returns the first YouTube search result for <query>."""
    request = http.get_json(search_api_url, q=text)

    if 'error' in request:
        return 'error performing search'

    if request['data']['totalItems'] == 0:
        return 'no results found'

    video_id = request['data']['items'][0]['id']

    return get_video_description(video_id) + " - " + video_url % video_id
Ejemplo n.º 29
0
def api_request(method, **params):
    api_key = bot.config.get_api_key("lastfm")
    params.update({"method": method, "api_key": api_key})

    try:
        data = http.get_json(api_url, params=params)
    except:
        return None, "LastFM API error, please try again in a few minutes."

    if 'error' in data:
        return data, "Error: {}.".format(data["message"])

    return data, None
Ejemplo n.º 30
0
def reddit_post_url(match, message):
    post_id = match.group(1)

    try:
        data = http.get_json(post_url.format(post_id), timeout=2)
        item = data[0]["data"]["children"][0]["data"]
        message(format_output(item))
    except Exception as e:
        print(match.group(0))
        title = http.get_title(match.group(0))
        title = u' '.join(re.sub(u'\r|\n', u' ', title).split()).strip('| ')
        url = web.try_shorten(match.group(0))
        message(f"{url} - {title}")
Ejemplo n.º 31
0
def domainr(text):
    """<domain> - Uses domain.nr's API to search for a domain, and similar domains."""
    api_key = bot.config.get_api_key("rapidapi")
    if not api_key:
        return "This command requires an API key from rapidapi.com."

    params = {'client_id': api_key, 'domain': text}
    data = http.get_json('https://domainr.p.rapidapi.com/v2/search', params=params)
    if data['query'] == "":
        return "An error occurred: {status} - {message}".format(**data['error'])

    domains = [format_domain(domain) for domain in data["results"]]
    return "Domains: {}".format(", ".join(domains))
Ejemplo n.º 32
0
def custom_get(query, key, is_image=None, num=1):
    params = {
        "q": query,
        "cx": key['cx'],
        "key": key['access'],
        "num": num,
        "fields": "items(title,link,snippet)",
        "safe": "off"
    }

    if is_image:
        params["searchType"] = "image"

    return http.get_json(base_url, params=params)
Ejemplo n.º 33
0
def spartist(inp):
    """spartist <artist> -- Search Spotify for <artist>"""
    try:
        data = http.get_json("http://ws.spotify.com/search/1/artist.json", q=inp.strip())
    except Exception as e:
        return "Could not get artist information: {}".format(e)

    try:
        type, id = data["artists"][0]["href"].split(":")[1:]
    except IndexError:
        return "Could not find artist."
    url = sptfy(gateway.format(type, id))

    return "\x02{}\x02 - {}".format(data["artists"][0]["name"], url)
Ejemplo n.º 34
0
def domainr(text):
    """<domain> - uses domain.nr's API to search for a domain, and similar domains
    :type text: str
    """
    try:
        data = http.get_json('http://domai.nr/api/json/search?q=' + text)
    except (http.URLError, http.HTTPError):
        return "Unable to get data for some reason. Try again later."
    if data['query'] == "":
        return "An error occurred: {status} - {message}".format(
            **data['error'])

    domains = [format_domain(domain) for domain in data["results"]]
    return "Domains: {}".format(", ".join(domains))
Ejemplo n.º 35
0
def reddit(text):
    """<subreddit> [n] - gets a random post from <subreddit>, or gets the [n]th post in the subreddit"""
    id_num = None

    if text:
        # clean and split the input
        parts = text.lower().strip().split()

        # find the requested post number (if any)
        if len(parts) > 1:
            url = base_url.format(parts[0].strip())
            try:
                id_num = int(parts[1]) - 1
            except ValueError:
                return "Invalid post number."
        else:
            url = base_url.format(parts[0].strip())
    else:
        url = "http://reddit.com/.json"

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    # get the requested/random post
    if id_num is not None:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(length)
    else:
        item = random.choice(data)["data"]

    item["title"] = formatting.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    raw_time = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(raw_time)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
           " {timesince} ago - {ups} upvotes, {downs} downvotes -" \
           " {link}{warning}".format(**item)
Ejemplo n.º 36
0
def lyrics(text, bot, reply):
	"""lyrics <song> -- Search genius for a specific song"""
	access_token = bot.config.get("api_keys", {}).get("genius_access_token", None)
	if not access_token:
		reply("An API key is needed to use this application.")
	try:
		data = http.get_json((base_url.format(access_token)), q=text.strip())
	except Exception as e:
		reply("Could not find song: {}".format(e))
	try:
		song = data["response"]["hits"][0]["index"]
	except IndexError:
		reply("Could not find song")
	reply("{} by {} - {}".format(data["response"]["hits"][0]["result"]["title"], data["response"]["hits"][0]["result"]["primary_artist"]["name"], data["response"]["hits"][0]["result"]["url"]))
Ejemplo n.º 37
0
async def geoip(text, reply, loop):
    """<IP address> - Gets the location of an IP address."""
    api_key = bot.config.get_api_key("ipapi")
    if not api_key:
        return "This command requires an API key from ipapi.com."

    url = "http://api.ipapi.com/" + http.quote(text.encode('utf8'), safe='')

    try:
        data = http.get_json(url, access_key=api_key)
    except:
        return f"I couldn't find {text}"

    return fformat(data).replace('in United', 'in the United')
Ejemplo n.º 38
0
def wouldyourather_first():
    """Asks a would you rather question"""
    
    attempts = 0
    while True:
        try:
            json = http.get_json('http://rrrather.com/botapi')
        except:
            if attempts > 2:
                return "There was an error contacting the rrrather.com API."
            else:
                attempts += 1
                continue
        response = "{}: {} \x02OR\x0F {}?".format(json['title'], json['choicea'], json['choiceb'])
        return response 
Ejemplo n.º 39
0
def wiki(text, message):
    """<phrase> - Gets first sentence of Wikipedia article on <phrase>."""
    try:
        params = { 'action': 'query', 'list': 'search',
                   'format': 'json', 'srsearch': http.quote(text) }
        search = http.get_json(search_api, params=params)
    except:
        return 'Error accessing Wikipedia API, please try again in a few minutes.'

    if len(search['query']['search']) == 0:
        return 'Your query returned no results, please check your textut and try again.'

    try:
        params = { 'format': 'json' , 'action': 'query' , 'prop': 'extracts',
                   'exintro': True, 'explaintext': True, 'exchars' : 425,
                   'redirects': 1, 'titles': search['query']['search'][0]['title'] }
        data = http.get_json(search_api, params=params)
    except:
        return 'Error accessing Wikipedia API, please try again in a few minutes.'


    data = data['query']['pages'][list(data['query']['pages'].keys())[0]]
    data['extract'] = data['extract'].strip('...').rsplit('.', 1)[0] + '.'
    message(u'{} - {}'.format(web.try_shorten(page_url + data['title']), data['extract']))
Ejemplo n.º 40
0
def fact():
    """Gets a random fact about numbers or dates."""
    fact_type = random.choice(types)
    attempts = 0
    while True:
        try:
            json = http.get_json('http://numbersapi.com/random/{}?json'.format(fact_type))
        except:
            if attempts > 2:
                return "There was an error contacting the numbersapi.com API."
            else:
                attempts += 1
                continue
        response = json['text']
        return response 
Ejemplo n.º 41
0
def geocode(text):
    api_key = bot.config.get_api_key("google").get('access')
    if not api_key:
        raise Exception(
            "This command requires a Google Developers Console API key.")

    bias = bot.config.get('region_bias_cc')

    if bias:
        params = {'key': api_key, 'address': text, 'region': bias}
    else:
        params = {'key': api_key, 'address': text}
    data = http.get_json(geo_url, params=params)

    return data['results'][0]
Ejemplo n.º 42
0
def get_youtube_info(video_id, api_key=None):
    params = {
        "id": video_id,
        "key": api_key,
        "part": "snippet,contentDetails,statistics"
    }
    result = http.get_json(video_url, params=params)

    if result.get('error') or not result.get('items') or len(result['items']) < 1:
        return web.try_shorten(short_url+video_id)

    playtime = result['items'][0]['contentDetails']['duration'].strip('PT').lower()
    views = int(result['items'][0]['statistics']['viewCount'])
    return output_format.format(url=web.try_shorten(short_url+video_id), time=playtime,
                                views=views, **result['items'][0]['snippet'])
Ejemplo n.º 43
0
def vimeo_url(match):
    """<url> -- returns information on the Vimeo video at <url>"""
    info = http.get_json('https://vimeo.com/api/v2/video/{}.json'.format(
        match.group(1)))

    if info:
        info[0]["duration"] = timeformat.format_time(info[0]["duration"])
        info[0]["stats_number_of_likes"] = format(
            info[0]["stats_number_of_likes"], ",d")
        info[0]["stats_number_of_plays"] = format(
            info[0]["stats_number_of_plays"], ",d")
        return (
            "[h1]Vimeo:[/h1] {title} [div] {user_name} [div] {duration} [div] "
            "{upload_date} [div] "
            "[h1]Likes:[/h1] {stats_number_of_likes} [div] "
            "[h1]Plays:[/h1] {stats_number_of_plays}".format(**info[0]))
Ejemplo n.º 44
0
def gif(text, message):
    """<query> - Returns first giphy search result."""
    api_key = bot.config.get_api_key("giphy")
    if not api_key:
        return "This command requires an API key from giphy.com."

    url = 'http://api.giphy.com/v1/gifs/search'
    try:
        response = http.get_json(url, q=text, limit=5, api_key=api_key)
    except http.HTTPError as e:
        return e.msg

    try:
        message(choice(response['data'])['bitly_gif_url'])
    except:
        message('No results found.')
Ejemplo n.º 45
0
def vimeo_url(match):
    """vimeo <url> -- returns information on the Vimeo video at <url>"""
    info = http.get_json('http://vimeo.com/api/v2/video/%s.json'
                         % match.group(1))

    if info:
        info[0]["duration"] = timeformat.format_time(info[0]["duration"])
        info[0]["stats_number_of_likes"] = format(
            info[0]["stats_number_of_likes"], ",d")
        info[0]["stats_number_of_plays"] = format(
            info[0]["stats_number_of_plays"], ",d")
        return ("\x02%(title)s\x02 - length \x02%(duration)s\x02 - "
                "\x02%(stats_number_of_likes)s\x02 likes - "
                "\x02%(stats_number_of_plays)s\x02 plays - "
                "\x02%(user_name)s\x02 on \x02%(upload_date)s\x02"
                % info[0])
Ejemplo n.º 46
0
def vimeo_url(match):
    """vimeo <url> - returns information on the Vimeo video at <url>"""
    video_id = match.group(1)
    data = http.get_json(api_url.format(id=video_id))

    if not data:
        return

    info = data[0]

    info["duration"] = timeformat.format_time(info["duration"])
    info.setdefault("stats_number_of_likes", 0)

    return ("\x02{title}\x02 - length \x02{duration}\x02 - "
            "\x02{stats_number_of_likes:,d}\x02 likes - "
            "\x02{stats_number_of_plays:,d}\x02 plays - "
            "\x02{user_name}\x02 on \x02{upload_date}\x02".format_map(info))
Ejemplo n.º 47
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))

    if data['description']:
        desc = ": {} ".format(formatting.truncate_str(data['description'], 50))
    else:
        desc = ""
    if data['genre']:
        genre = "- Genre: \x02{}\x02 ".format(data['genre'])
    else:
        genre = ""

    url = web.try_shorten(data['permalink_url'])

    return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'],
        data['comment_count'], url)
Ejemplo n.º 48
0
def imdb_url(match):
    imdb_id = match.group(4).split('/')[-1]
    if imdb_id == "":
        imdb_id = match.group(4).split('/')[-2]
    content = http.get_json("http://www.omdbapi.com/", i=imdb_id)
    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
        content['Plot'] = formatting.truncate_str(content['Plot'], 50)
        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        return out % content
    else:
        return 'Unknown error.'
Ejemplo n.º 49
0
def newegg(text):
    """newegg <item name> - searches newegg.com for <item name>"""

    # form the search request
    request = {
        "Keyword": text,
        "Sort": "FEATURED"
    }

    # submit the search request
    r = http.get_json(
        'http://www.ows.newegg.com/Search.egg/Advanced',
        post_data=json.dumps(request).encode('utf-8')
    )

    # get the first result
    if r["ProductListItems"]:
        return format_item(r["ProductListItems"][0])
    else:
        return "No results found."
Ejemplo n.º 50
0
def spotify_url(match):
    type = match.group(2)
    spotify_id = match.group(3)
    url = spuri.format(type, spotify_id)
    # no error catching here, if the API is down fail silently
    data = http.get_json("http://ws.spotify.com/lookup/1/.json", uri=url)
    if type == "track":
        name = data["track"]["name"]
        artist = data["track"]["artists"][0]["name"]
        album = data["track"]["album"]["name"]

        return "Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - {}".format(name, artist,
                                                                                               album, sptfy(
                gateway.format(type, spotify_id)))
    elif type == "artist":
        return "Spotify Artist: \x02{}\x02 - {}".format(data["artist"]["name"],
                                                        sptfy(gateway.format(type, spotify_id)))
    elif type == "album":
        return "Spotify Album: \x02{}\x02 - \x02{}\x02 - {}".format(data["album"]["artist"],
                                                                    data["album"]["name"],
                                                                    sptfy(gateway.format(type, spotify_id)))
Ejemplo n.º 51
0
def mcwiki(text):
    """mcwiki <phrase> - gets the first paragraph of the Minecraft Wiki article on <phrase>"""

    try:
        j = http.get_json(api_url, search=text)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
Ejemplo n.º 52
0
def newegg_url(match):
    item_id = match.group(1)
    item = http.get_json(API_PRODUCT.format(item_id))
    return format_item(item, show_url=False)
Ejemplo n.º 53
0
def weather(text, reply, db, nick, bot, notice):
    """weather <location> [dontsave] -- Gets weather data
    for <location> from Wunderground."""

    api_key = bot.config.get("api_keys", {}).get("wunderground")

    if not api_key:
        return "Error: No wunderground API details."

    # initialise weather DB
    db.execute("create table if not exists weather(nick primary key, loc)")

    # if there is no input, try getting the users last location from the DB
    if not text:
        location = db.execute("select loc from weather where nick=lower(:nick)",
                              {"nick": nick}).fetchone()
        print(location)
        if not location:
            # no location saved in the database, send the user help text
            notice(weather.__doc__)
            return
        loc = location[0]

        # no need to save a location, we already have it
        dontsave = True
    else:
        # see if the input ends with "dontsave"
        dontsave = text.endswith(" dontsave")

        # remove "dontsave" from the input string after checking for it
        if dontsave:
            loc = text[:-9].strip().lower()
        else:
            loc = text

    location = http.quote_plus(loc)

    request_url = base_url.format(api_key, "geolookup/forecast/conditions", location)
    response = http.get_json(request_url)

    if 'location' not in response:
        try:
            location_id = response['response']['results'][0]['zmw']
        except KeyError:
            return "Could not get weather for that location."

        # get the weather again, using the closest match
        request_url = base_url.format(api_key, "geolookup/forecast/conditions", "zmw:" + location_id)
        response = http.get_json(request_url)

    if response['location']['state']:
        place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                                  response['location']['state'],
                                                                  response['location']['country'])
    else:
        place_name = "\x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                      response['location']['country'])

    forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0]
    forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][1]

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place": place_name,
        "conditions": response['current_observation']['weather'],
        "temp_f": response['current_observation']['temp_f'],
        "temp_c": response['current_observation']['temp_c'],
        "humidity": response['current_observation']['relative_humidity'],
        "wind_kph": response['current_observation']['wind_kph'],
        "wind_mph": response['current_observation']['wind_mph'],
        "wind_direction": response['current_observation']['wind_dir'],
        "today_conditions": forecast_today['conditions'],
        "today_high_f": forecast_today['high']['fahrenheit'],
        "today_high_c": forecast_today['high']['celsius'],
        "today_low_f": forecast_today['low']['fahrenheit'],
        "today_low_c": forecast_today['low']['celsius'],
        "tomorrow_conditions": forecast_tomorrow['conditions'],
        "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'],
        "tomorrow_high_c": forecast_tomorrow['high']['celsius'],
        "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'],
        "tomorrow_low_c": forecast_tomorrow['low']['celsius'],
        "url": web.shorten(response["current_observation"]['forecast_url'] + "?apiref=e535207ff4757b18")
    }

    reply("{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, "
          "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, "
          "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. "
          "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, "
          "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(**weather_data))

    if location and not dontsave:
        db.execute("insert or replace into weather(nick, loc) values (:nick, :loc)",
                   {"nick": nick.lower(), "loc": loc})
        db.commit()
Ejemplo n.º 54
0
def hulu_url(match):
    data = http.get_json("http://www.hulu.com/api/oembed.json?url=http://www.hulu.com" + match.group(3))
    showname = data["title"].split("(")[-1].split(")")[0]
    title = data["title"].split(" (")[0]
    return "{}: {} - {}".format(showname, title, timeformat.timeformat(int(data["duration"])))
Ejemplo n.º 55
0
def api_get(kind, query):
    """Use the RESTful Google Search API"""
    url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
          'v=1.0&safe=moderate'
    return http.get_json(url % kind, q=query)