Ejemplo n.º 1
0
def rottentomatoes(inp, bot=None):
    '.rt <title> -- gets ratings for <title> from Rotten Tomatoes'

    api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None)
    if not api_key:
        return None

    title = inp.strip()

    results = http.get_json(movie_search_url %
                            (http.quote_plus(title), api_key))
    if results['total'] > 0:
        movie = results['movies'][0]
        title = movie['title']
        id = movie['id']
        critics_score = movie['ratings']['critics_score']
        audience_score = movie['ratings']['audience_score']
        url = movie['links']['alternate']

        if critics_score != -1:
            reviews = http.get_json(movie_reviews_url % (id, api_key))
            review_count = reviews['total']

            fresh = critics_score * review_count / 100
            rotten = review_count - fresh

            return response % (title, critics_score, fresh, rotten,
                               audience_score, url)
Ejemplo n.º 2
0
def define(text):
    """define <word> -- Fetches definition of <word>.
    :type text: str
    """

    url = 'http://ninjawords.com/'

    h = http.get_html(url + http.quote_plus(text))

    definition = h.xpath('//dd[@class="article"] | '
                         '//div[@class="definition"] |'
                         '//div[@class="example"]')

    if not definition:
        return 'No results for ' + text + ' :('

    result = format_output(h, definition, True)
    if len(result) > 450:
        result = format_output(h, definition, False)

    if len(result) > 450:
        result = result[:result.rfind(' ', 0, 450)]
        result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'

    return result
Ejemplo n.º 3
0
def rottentomatoes(inp, bot=None):
    '.rt <title> -- gets ratings for <title> from Rotten Tomatoes'

    api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None)
    if not api_key:
        return None

    title = inp.strip()

    results = http.get_json(movie_search_url % (http.quote_plus(title), api_key))
    if results['total'] > 0:
        movie = results['movies'][0]
        title = movie['title']
        id = movie['id']
        critics_score = movie['ratings']['critics_score']
        audience_score = movie['ratings']['audience_score']
        url = movie['links']['alternate']

        if critics_score != -1:
            reviews = http.get_json(movie_reviews_url%(id, api_key))
            review_count = reviews['total']

            fresh = critics_score * review_count / 100
            rotten = review_count - fresh

            return response % (title, critics_score, fresh, rotten, audience_score, url)
Ejemplo n.º 4
0
def weather(inp, nick=None, reply=None, db=None, notice=None):
    "weather | <location> [save] | <@ user> -- Gets weather data for <location>."
    save = True
    
    if '@' in inp:
        save = False
        nick = inp.split('@')[1].strip()
        loc = database.get(db,'users','location','nick',nick)
        if not loc: return "No location stored for {}.".format(nick.encode('ascii', 'ignore'))
    else:
        loc = database.get(db,'users','location','nick',nick)
        if not inp:
            if not loc:
                notice(weather.__doc__)
                return
        else:
            # if not loc: save = True
            if " dontsave" in inp: 
                inp = inp.replace(' dontsave','')
                save = False
            loc = inp.replace(' ','_') #.split()[0]

    location = http.quote_plus(loc)
    # location = location.replace(',','').replace(' ','-')

    # now, to get the actual weather
    try:
        data = get_weather('%s' % location)
    except KeyError:
        return "Could not get weather for that location."

    if location and save: database.set(db,'users','location',location,'nick',nick)

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place": data['location']['city'],
        "conditions": data['item']['condition']['text'],
        "temp_f": data['item']['condition']['temp'],
        "temp_c": data['item']['condition']['temp_c'],
        "humidity": data['atmosphere']['humidity'],
        "wind_kph": data['wind']['speed_kph'],
        "wind_mph": data['wind']['speed'],
        "wind_text": data['wind']['text'],
        "forecast": data['item']['forecast'][0]['text'],
        "high_f": data['item']['forecast'][0]['high'],
        "high_c": data['item']['forecast'][0]['high_c'],
        "low_f": data['item']['forecast'][0]['low'],
        "low_c": data['item']['forecast'][0]['low_c'],
        "_forecast": data['item']['forecast'][1]['text'],
        "_high_f": data['item']['forecast'][1]['high'],
        "_high_c": data['item']['forecast'][1]['high_c'],
        "_low_f": data['item']['forecast'][1]['low'],
        "_low_c": data['item']['forecast'][1]['low_c']
    }
 
    reply("\x02{place}\x02 - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, Humidity: {humidity}%, " \
            "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_text}, \x02Today:\x02 {forecast}, " \
            "High: {high_f}F/{high_c}C, Low: {low_f}F/{low_c}C. " \
            "\x02Tomorrow:\x02 {_forecast}, High: {_high_f}F" \
            "/{_high_c}C, Low: {_low_f}F/{_low_c}C.".format(**weather_data))
Ejemplo n.º 5
0
def weather(inp, bot=None, reply=None, nick=None, db=None):
  '.weather <location> [dontsave] -- gets weather data for location'

  request = 'http://api.worldweatheronline.com/free/v1/weather.ashx?key={0}&q={1}&num_of_days=1&format=json'

  loc = inp

  # init db
  db.execute("create table if not exists weather(nick primary key, loc)")

  dontsave = inp.endswith(' dontsave')
  if dontsave:
    loc = inp[:-9]

  loc = inp.lower().strip()

  # no address given
  if not loc:
    loc = db.execute("select loc from weather where nick=lower(?)",
                     (nick,)).fetchone()
    if not loc:
      return weather.__doc__
    loc = loc[0]

  try:
    j = http.get_json(request.format(bot.config['api_keys']['wwo'], http.quote_plus(loc)))
  except IOError, e:
    print e
    return 'um the api broke or something'
Ejemplo n.º 6
0
def define(inp):
    """.define/.dict <word> - fetches definition of <word>."""
    url = 'http://ninjawords.com/'

    try:
        h = http.get_html(url + http.quote_plus(inp))
    except:
        return "API error; please try again in a few minutes."

    definition = h.xpath('//dd[@class="article"] | '
                         '//div[@class="definition"] |'
                         '//div[@class="example"]')

    if not definition:
        return 'No results for ' + inp

    def format_output(show_examples):
        result = '%s: ' % h.xpath('//dt[@class="title-word"]/a/text()')[0]

        correction = h.xpath('//span[@class="correct-word"]/text()')
        if correction:
            result = 'definition for "%s": ' % correction[0]

        sections = []
        for section in definition:
            if section.attrib['class'] == 'article':
                sections += [[section.text_content() + ': ']]
            elif section.attrib['class'] == 'example':
                if show_examples:
                    sections[-1][-1] += ' ' + section.text_content()
            else:
                sections[-1] += [section.text_content()]

        for article in sections:
            result += article[0]
            if len(article) > 2:
                result += ' '.join('%d. %s' % (n + 1, section)
                                   for n, section in enumerate(article[1:]))
            else:
                result += article[1] + ' '

        synonyms = h.xpath('//dd[@class="synonyms"]')
        if synonyms:
            result += synonyms[0].text_content()

        result = re.sub(r'\s+', ' ', result)
        result = re.sub('\xb0', '', result)
        return result

    result = format_output(True)
    if len(result) > 450:
        result = format_output(False)

    if len(result) > 450:
        result = result[:result.rfind(' ', 0, 450)]
        result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'

    return result
Ejemplo n.º 7
0
def lmgtfy(inp, bot=None):
    "lmgtfy [phrase] - Posts a google link for the specified phrase"

    link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Ejemplo n.º 8
0
def define(inp):
    """define <word> -- Fetches definition of <word>."""

    url = 'http://ninjawords.com/'

    h = http.get_html(url + http.quote_plus(inp))

    definition = h.xpath('//dd[@class="article"] | '
                         '//div[@class="definition"] |'
                         '//div[@class="example"]')

    if not definition:
        return 'No results for ' + inp + ' :('

    def format_output(show_examples):
        result = '{}: '.format(
            h.xpath('//dt[@class="title-word"]/a/text()')[0])

        correction = h.xpath('//span[@class="correct-word"]/text()')
        if correction:
            result = 'Definition for "{}": '.format(correction[0])

        sections = []
        for section in definition:
            if section.attrib['class'] == 'article':
                sections += [[section.text_content() + ': ']]
            elif section.attrib['class'] == 'example':
                if show_examples:
                    sections[-1][-1] += ' ' + section.text_content()
            else:
                sections[-1] += [section.text_content()]

        for article in sections:
            result += article[0]
            if len(article) > 2:
                result += u' '.join(u'{}. {}'.format(n + 1, section)
                                    for n, section in enumerate(article[1:]))
            else:
                result += article[1] + ' '

        synonyms = h.xpath('//dd[@class="synonyms"]')
        if synonyms:
            result += synonyms[0].text_content()

        result = re.sub(r'\s+', ' ', result)
        result = re.sub('\xb0', '', result)
        return result

    result = format_output(True)
    if len(result) > 450:
        result = format_output(False)

    if len(result) > 450:
        result = result[:result.rfind(' ', 0, 450)]
        result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'

    return result
Ejemplo n.º 9
0
def lmgtfy(inp, bot=None):
    "lmgtfy [phrase] - Posts a google link for the specified phrase"

    link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Ejemplo n.º 10
0
def lmgtfy(inp):
    """lmgtfy [phrase] - Posts a google link for the specified phrase"""

    link = "http://lmgtfy.com/?q={}".format(http.quote_plus(inp))

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Ejemplo n.º 11
0
def define(inp):
    """define <word> -- Fetches definition of <word>."""

    url = 'http://ninjawords.com/'

    h = http.get_html(url + http.quote_plus(inp))

    definition = h.xpath('//dd[@class="article"] | '
                         '//div[@class="definition"] |'
                         '//div[@class="example"]')

    if not definition:
        return 'No results for ' + inp + ' :('

    def format_output(show_examples):
        result = '{}: '.format(h.xpath('//dt[@class="title-word"]/a/text()')[0])

        correction = h.xpath('//span[@class="correct-word"]/text()')
        if correction:
            result = 'Definition for "{}": '.format(correction[0])

        sections = []
        for section in definition:
            if section.attrib['class'] == 'article':
                sections += [[section.text_content() + ': ']]
            elif section.attrib['class'] == 'example':
                if show_examples:
                    sections[-1][-1] += ' ' + section.text_content()
            else:
                sections[-1] += [section.text_content()]

        for article in sections:
            result += article[0]
            if len(article) > 2:
                result += u' '.join(u'{}. {}'.format(n + 1, section)
                                    for n, section in enumerate(article[1:]))
            else:
                result += article[1] + ' '

        synonyms = h.xpath('//dd[@class="synonyms"]')
        if synonyms:
            result += synonyms[0].text_content()

        result = re.sub(r'\s+', ' ', result)
        result = re.sub('\xb0', '', result)
        return result

    result = format_output(True)
    if len(result) > 450:
        result = format_output(False)

    if len(result) > 450:
        result = result[:result.rfind(' ', 0, 450)]
        result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'

    return result
Ejemplo n.º 12
0
def time_cmd(inp, reply=None, bot=None):
  if inp == 'lodon':
    inp = 'london'
  if inp == 'my life':
    return 'no:no'
  request = 'http://api.worldweatheronline.com/free/v1/tz.ashx?key={0}&q={1}&format=json'
  j = http.get_json(request.format(bot.config['api_keys']['wwo'], http.quote_plus(inp)))
  j = j['data']['time_zone'][0]
  utc = j['utcOffset'].split('.')[0]
  utc = '+' + utc if float(utc) >= 0 else utc
  return '{0} (UTC {1})'.format(j['localtime'], utc)
Ejemplo n.º 13
0
def wolframalpha(inp, bot=None):
    "wa <query> -- Computes <query> using Wolfram Alpha."

    api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)

    if not api_key:
        return "error: missing api key"

    url = 'http://api.wolframalpha.com/v2/query?format=plaintext'

    result = http.get_xml(url, input=inp, appid=api_key)

    # get the URL for a user to view this query in a browser
    query_url = "http://www.wolframalpha.com/input/?i=" + \
                http.quote_plus(inp.encode('utf-8'))
    try:
        short_url = web.isgd(query_url)
    except (web.ShortenError, http.HTTPError):
        short_url = query_url

    pod_texts = []
    for pod in result.xpath("//pod[@primary='true']"):
        title = pod.attrib['title']
        if pod.attrib['id'] == 'Input':
            continue

        results = []
        for subpod in pod.xpath('subpod/plaintext/text()'):
            subpod = subpod.strip().replace('\\n', '; ')
            subpod = re.sub(r'\s+', ' ', subpod)
            if subpod:
                results.append(subpod)
        if results:
            pod_texts.append(title + ': ' + ', '.join(results))

    ret = ' - '.join(pod_texts)

    if not pod_texts:
        return 'No results.'

    ret = re.sub(r'\\(.)', r'\1', ret)

    def unicode_sub(match):
        return unichr(int(match.group(1), 16))

    ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)

    ret = text.truncate_str(ret, 250)

    if not ret:
        return 'No results.'

    return "%s - %s" % (ret, short_url)
Ejemplo n.º 14
0
def define(inp):
    "define <word> -- Fetches definition of <word>."

    url = "http://ninjawords.com/"

    h = http.get_html(url + http.quote_plus(inp))

    definition = h.xpath('//dd[@class="article"] | ' '//div[@class="definition"]')

    if not definition:
        return "No results for " + inp + " :("

    def format_output(show_examples):
        result = "%s: " % h.xpath('//dt[@class="title-word"]/a/text()')[0]

        correction = h.xpath('//span[@class="correct-word"]/text()')
        if correction:
            result = 'Definition for "%s": ' % correction[0]

        sections = []
        for section in definition:
            if section.attrib["class"] == "article":
                sections += [[section.text_content() + ": "]]
            elif section.attrib["class"] == "example":
                if show_examples:
                    sections[-1][-1] += " " + section.text_content()
            else:
                sections[-1] += [section.text_content()]

        for article in sections:
            result += article[0]
            if len(article) > 2:
                result += " ".join("%d. %s" % (n + 1, section) for n, section in enumerate(article[1:]))
            else:
                result += article[1] + " "

        synonyms = h.xpath('//dd[@class="synonyms"]')
        if synonyms:
            result += synonyms[0].text_content()

        result = re.sub(r"\s+", " ", result)
        result = re.sub("\xb0", "", result)
        return result

    result = format_output(True)
    if len(result) > 450:
        result = format_output(False)

    if len(result) > 450:
        result = result[: result.rfind(" ", 0, 450)]
        result = re.sub(r"[^A-Za-z]+\.?$", "", result) + " ..."

    return result
Ejemplo n.º 15
0
def weather(inp, nick=None, reply=None, db=None, notice=None):
    "weather | <location> [save] | <@ user> -- Gets weather data for <location>."
    save = False 
    
    if '@' in inp:
        save = False
        nick = inp.split('@')[1].strip()
        loc = database.get(db,'users','location','nick',nick)
        if not loc: return "No location stored for {}.".format(nick.encode('ascii', 'ignore'))
    else:
        loc = database.get(db,'users','location','nick',nick)
        if not inp:
            if not loc:
                notice(weather.__doc__)
                return
        else:
            # if not loc: save = True
            if " save" in inp: 
                inp = inp.replace(' save','')
                save = True 
            loc = inp.replace(' ','_') #.split()[0]

    location = http.quote_plus(loc)
    # location = location.replace(',','').replace(' ','-')

    # now, to get the actual weather
    try:

	    q ={
		'q': 'select title, units.temperature, item.forecast from weather.forecast where woeid in (select woeid from geo.places where text="'+ location+'") limit 1',
		 'format': 'json',
		 'env': 'store://datatables.org/alltableswithkeys'
		}

	    result = query(q)
	    data = json.loads(result)
	    weather = data["query"]["results"]["channel"]
	    average_F =  float((int(weather['item']['forecast']['high']) + int(weather['item']['forecast']['low']))/2)
	    average_C = round(float((average_F - 32) * (5.0/9.0)), 2)
    except KeyError:
        return "Could not get weather for that location."

    if location and save: database.set(db,'users','location',location,'nick',nick)

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
	'title': weather["title"].replace("Yahoo! Weather -", ""), 
	'current': weather['item']['forecast']['text'],
	'temp_f': average_F,
	'temp_c': average_C
    }
 
    reply("\x02{title}\x02 - \x02Current:\x02 {current}, {temp_f}F/{temp_c}C".format(**weather_data))
Ejemplo n.º 16
0
def xkcd_search(term):
    search_term = http.quote_plus(term)
    soup = http.get_soup("http://www.ohnorobot.com/index.pl?s={}&Search=Search&"
                         "comic=56&e=0&n=0&b=0&m=0&d=0&t=0".format(search_term))
    result = soup.find('li')
    if result:
        url = result.find('div', {'class': 'tinylink'}).text
        xkcd_id = url[:-1].split("/")[-1]
        print(xkcd_id)
        return xkcd_info(xkcd_id, url=True)
    else:
        return "No results found!"
Ejemplo n.º 17
0
def xkcd_search(term):
    search_term = http.quote_plus(term)
    soup = http.get_soup(
        "http://www.ohnorobot.com/index.pl?s={}&Search=Search&"
        "comic=56&e=0&n=0&b=0&m=0&d=0&t=0".format(search_term))
    result = soup.find('li')
    if result:
        url = result.find('div', {'class': 'tinylink'}).text
        xkcd_id = url[:-1].split("/")[-1]
        print xkcd_id
        return xkcd_info(xkcd_id, url=True)
    else:
        return "No results found!"
Ejemplo n.º 18
0
def get_wiki_article(inp):
    """ search darksouls.wikidot.com """

    # using scraping instead of the wikidot api because it sucks
    results = http.get_html(search_url % http.quote_plus(inp))
    
    try:
        result = results.xpath(result_xpath)[0]
        page_url = result.values()[0]
    except IndexError:
        return "No Results"

    title = result.text_content()
    return "%s -- %s" % (title, page_url)
Ejemplo n.º 19
0
def get_status(name):
    """ takes a name and returns status """
    try:
        name_encoded = http.quote_plus(name)
        response = http.get(NAME_URL.format(name_encoded))
    except (http.URLError, http.HTTPError) as e:
        raise McuError("Could not get name status: {}".format(e))

    if "OK" in response:
        return "free"
    elif "TAKEN" in response:
        return "taken"
    elif "invalid characters" in response:
        return "invalid"
Ejemplo n.º 20
0
def get_wiki_article(inp):
    # using scraping instead of the wikidot api because it sucks
    # it doesn't have a text search, though the site might just be using the api method to select tags anyway
    
    results = http.get_html(search_url % http.quote_plus(inp))
    
    try:
        result = results.xpath(result_xpath)[0]
        page_url = result.values()[0]
    except IndexError:
        return "No Results"

    title = result.text_content()
    return "%s -- %s" % (title, page_url)
Ejemplo n.º 21
0
def get_status(name):
    """ takes a name and returns status """
    try:
        name_encoded = http.quote_plus(name)
        response = http.get(NAME_URL.format(name_encoded))
    except (http.URLError, http.HTTPError) as e:
        raise McuError("Could not get name status: {}".format(e))

    if "OK" in response:
        return "free"
    elif "TAKEN" in response:
        return "taken"
    elif "invalid characters" in response:
        return "invalid"
Ejemplo n.º 22
0
def get_wiki_article(inp):
    # using scraping instead of the wikidot api because it sucks
    # it doesn't have a text search, though the site might just be using the api method to select tags anyway

    results = http.get_html(search_url % http.quote_plus(inp))

    try:
        result = results.xpath(result_xpath)[0]
        page_url = result.values()[0]
    except IndexError:
        return "No Results"

    title = result.text_content()
    return "%s -- %s" % (title, page_url)
Ejemplo n.º 23
0
def validate(inp):
    "validate <url> -- Runs url through the w3c markup validator."

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        errorcount = info['x-w3c-validator-errors']
        warningcount = info['x-w3c-validator-warnings']
        return "%s was found to be %s with %s errors and %s warnings." \
                " see: %s" % (inp, status, errorcount, warningcount, url)
Ejemplo n.º 24
0
def validate(inp):
    ".validate <url> -- runs url through w3c markup validator"

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        errorcount = info['x-w3c-validator-errors']
        warningcount = info['x-w3c-validator-warnings']
        return "%s was found to be %s with %s errors and %s warnings." \
            " see: %s" % (inp, status, errorcount, warningcount, url)
Ejemplo n.º 25
0
def validate(inp):
    """validate <url> -- Runs url through the w3c markup validator."""

    if not inp.startswith('http://'):
        inp = 'http://' + inp

    url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info['x-w3c-validator-status'].lower()
    if status in ("valid", "invalid"):
        error_count = info['x-w3c-validator-errors']
        warning_count = info['x-w3c-validator-warnings']
        return "{} was found to be {} with {} errors and {} warnings." \
               " see: {}".format(inp, status, error_count, warning_count, url)
Ejemplo n.º 26
0
def plugin_search(term):
    """ searches for a plugin with the bukget API and returns the slug """
    term = term.lower().strip()

    search_term = http.quote_plus(term)

    try:
        results = http.get_json(search_url.format(search_term))
    except (http.HTTPError, http.URLError) as e:
        raise BukgetError(500, "Error Fetching Search Page: {}".format(e))

    if not results:
        raise BukgetError(404, "No Results Found")

    for result in results:
        if result["slug"] == term:
            return result["slug"]

    return results[0]["slug"]
Ejemplo n.º 27
0
def plugin_search(term):
    """ searches for a plugin with the bukget API and returns the slug """
    term = term.lower().strip()

    search_term = http.quote_plus(term)

    try:
        results = http.get_json(search_url.format(search_term))
    except (http.HTTPError, http.URLError) as e:
        raise BukgetError(500, "Error Fetching Search Page: {}".format(e))
    
    if not results:
        raise BukgetError(404, "No Results Found")

    for result in results:
        if result["slug"] == term:
            return result["slug"]

    return results[0]["slug"]
Ejemplo n.º 28
0
def validate(inp):
    ".validate <url> -- runs url through w3c markup validator"

    if not inp.startswith("http://"):
        inp = "http://" + inp

    url = "http://validator.w3.org/check?uri=" + http.quote_plus(inp)
    info = dict(http.open(url).info())

    status = info["x-w3c-validator-status"].lower()
    if status in ("valid", "invalid"):
        errorcount = info["x-w3c-validator-errors"]
        warningcount = info["x-w3c-validator-warnings"]
        return "%s was found to be %s with %s errors and %s warnings." " see: %s" % (
            inp,
            status,
            errorcount,
            warningcount,
            url,
        )
Ejemplo n.º 29
0
def get_beer(inp):
    """ search beeradvocate.com """

    search_url = "http://beeradvocate.com/search?q=%s"
    base_url = "http://beeradvocate.com"

    results = http.get_html(search_url % http.quote_plus(inp))

    try:
        result = results.xpath("//td[@id='mainContent']/div[2]/ul/li[1]")[0]
    except IndexError:
        return "No Results"

    page_url = base_url + result.xpath('a')[0].get('href')
    scores = http.get_html(page_url).cssselect('.BAscore_big')
    beer_info = [x.text_content() for x in result.xpath('a')]

    return "%s by %s :: Community Score: %s :: Bros Score: %s :: %s" % (
        beer_info[0], beer_info[1], scores[0].text_content(),
        scores[1].text_content(), page_url)
Ejemplo n.º 30
0
def get_beer(inp):
    """ search beeradvocate.com """

    search_url = "http://beeradvocate.com/search?q=%s"
    base_url = "http://beeradvocate.com"

    results = http.get_html(search_url % http.quote_plus(inp))
    
    try:
        result = results.xpath("//td[@id='mainContent']/div[2]/ul/li[1]")[0]
    except IndexError:
        return "No Results"

    page_url = base_url + result.xpath('a')[0].get('href')
    scores = http.get_html(page_url).cssselect('.BAscore_big')
    beer_info = [x.text_content() for x in result.xpath('a')]

    return "%s by %s :: Community Score: %s :: Bros Score: %s :: %s" % (beer_info[0], 
                                                                        beer_info[1],
                                                                        scores[0].text_content(), 
                                                                        scores[1].text_content(), page_url)
Ejemplo n.º 31
0
def steamcalc(inp, db=None):
    "steamcalc <user> -- Check the value of <user>s steam account."
    db_init(db)

    if " " in inp:
        return "Invalid Steam ID"

    uid = inp.strip().lower()
    url = "http://steamcalculator.com/id/{}".format(http.quote_plus(uid))

    # get the web page
    try:
        page = http.get_html(url)
    except Exception as e:
        return "Could not get Steam game listing: {}".format(e)

    # extract the info we need
    try:
        count_text = page.xpath("//div[@id='rightdetail']/text()")[0]
        count = int(count_re.findall(count_text)[0])

        value_text = page.xpath("//div[@id='rightdetail']/h1/text()")[0]
        value = float(value_re.findall(value_text)[0])
    except IndexError:
        return "Could not get Steam game listing."

    # save the info in the DB for steam rankings
    db.execute(
        "insert or replace into steam_rankings(id, value, count)"
        "values(?,?,?)", (uid, value, count))
    db.commit()

    # shorten the URL
    try:
        short_url = web.isgd(url)
    except web.ShortenError:
        short_url = url

    return u"\x02Games:\x02 {}, \x02Total Value:\x02 ${:.2f} USD - {}".format(
        count, value, short_url)
Ejemplo n.º 32
0
def steamcalc(inp, db=None):
    "steamcalc <user> -- Check the value of <user>s steam account."
    db_init(db)

    if " " in inp:
        return "Invalid Steam ID"

    uid = inp.strip().lower()
    url = "http://steamcalculator.com/id/{}".format(http.quote_plus(uid))

    # get the web page
    try:
        page = http.get_html(url)
    except Exception as e:
        return "Could not get Steam game listing: {}".format(e)

    # extract the info we need
    try:
        count_text = page.xpath("//div[@id='rightdetail']/text()")[0]
        count = int(count_re.findall(count_text)[0])

        value_text = page.xpath("//div[@id='rightdetail']/h1/text()")[0]
        value = float(value_re.findall(value_text)[0])
    except IndexError:
        return "Could not get Steam game listing."

    # save the info in the DB for steam rankings
    db.execute("insert or replace into steam_rankings(id, value, count)"
            "values(?,?,?)", (uid, value, count))
    db.commit()

    # shorten the URL
    try:
        short_url = web.isgd(url)
    except web.ShortenError:
        short_url = url

    return u"\x02Games:\x02 {}, \x02Total Value:\x02 ${:.2f} USD - {}".format(count, value, short_url)
Ejemplo n.º 33
0
def steamcalc(inp):
    "uses steamcalculator.com to find out how much a steam account is worth. doesnt do DLC."
    if " " in inp:
        return "Invalid Steam ID"

    url = "http://www.steamcalculator.com/id/{}".format(http.quote_plus(inp))

    try:
        page = http.get_html(url)
    except Exception as e:
        return "Could not get Steam game listing: {}".format(e)

    try:
        count = page.xpath("//div[@id='rightdetail']/text()")[0]
        number = count_re.findall(count)[0]

        value = page.xpath("//div[@id='rightdetail']/h1/text()")[0]
    except:
        return("Steam account not found under: "+inp)

    #short_url = web.isgd(url)

    return "Found {} games with a value of {}!".format(number, value)
Ejemplo n.º 34
0
def wiki(inp, say=None):
    """wiki <phrase> - Gets first sentence of Wikipedia article on <phrase>."""
    try:
        search_api = u'http://en.wikipedia.org/w/api.php'
        params = {
            'action': 'query',
            'list': 'search',
            'format': 'json',
            'srsearch': http.quote_plus(inp)
        }
        search = http.get_json(search_api, query_params=params)
    except:
        return 'Error accessing Wikipedia API, please try again in a few minutes.'

    if len(search['query']['search']) == 0:
        return 'Your query returned no results, please check your input and try again.'

    try:
        params = {
            'format': 'json',
            'action': 'query',
            'prop': 'info|extracts',
            'exintro': True,
            'explaintext': True,
            'exchars': 425,
            'pageids': search['query']['search'][0]['pageid'],
            'inprop': 'url',
            'redirects': 1
        }
        data = http.get_json(search_api, query_params=params)
    except:
        return 'Error accessing Wikipedia API, please try again in a few minutes.'

    data = data['query']['pages'][data['query']['pages'].keys()[0]]
    data['extract'] = data['extract'].strip('...').rsplit('.', 1)[0] + '.'
    say(u'{} - {}'.format(web.try_googl(data['fullurl']), data['extract']))
Ejemplo n.º 35
0
def weather(inp, nick=None, reply=None, db=None, notice=None):
    "weather | <location> [save] | <@ user> -- Gets weather data for <location>."
    save = False

    if '@' in inp:
        save = False
        nick = inp.split('@')[1].strip()
        loc = database.get(db, 'users', 'location', 'nick', nick)
        if not loc:
            return "No location stored for {}.".format(
                nick.encode('ascii', 'ignore'))
    else:
        loc = database.get(db, 'users', 'location', 'nick', nick)
        if not inp:
            if not loc:
                notice(weather.__doc__)
                return
        else:
            # if not loc: save = True
            if " save" in inp:
                inp = inp.replace(' save', '')
                save = True
            loc = inp.replace(' ', '_')  #.split()[0]

    location = http.quote_plus(loc)
    # location = location.replace(',','').replace(' ','-')

    # now, to get the actual weather
    try:

        q = {
            'q':
            'select title, units.temperature, item.forecast from weather.forecast where woeid in (select woeid from geo.places where text="'
            + location + '") limit 1',
            'format':
            'json',
            'env':
            'store://datatables.org/alltableswithkeys'
        }

        result = query(q)
        data = json.loads(result)
        weather = data["query"]["results"]["channel"]
        average_F = float((int(weather['item']['forecast']['high']) +
                           int(weather['item']['forecast']['low'])) / 2)
        average_C = round(float((average_F - 32) * (5.0 / 9.0)), 2)
    except KeyError:
        return "Could not get weather for that location."

    if location and save:
        database.set(db, 'users', 'location', location, 'nick', nick)

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        'title': weather["title"].replace("Yahoo! Weather -", ""),
        'current': weather['item']['forecast']['text'],
        'temp_f': average_F,
        'temp_c': average_C
    }

    reply("\x02{title}\x02 - \x02Current:\x02 {current}, {temp_f}F/{temp_c}C".
          format(**weather_data))
Ejemplo n.º 36
0
def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
    """weather <location> [dontsave] -- Gets weather data
    for <location> from Wunderground."""

    api_key = bot.config.get("api_keys", {}).get("wunderground")

    if not api_key:
        return "Error: No wunderground API details."

    # initialise weather DB
    db.execute("create table if not exists weather(nick primary key, loc)")

    # if there is no input, try getting the users last location from the DB
    if not inp:
        location = db.execute("select loc from weather where nick=lower(:nick)",
                              {"nick": nick}).fetchone()
        print(location)
        if not location:
            # no location saved in the database, send the user help text
            notice(weather.__doc__)
            return
        loc = location[0]

        # no need to save a location, we already have it
        dontsave = True
    else:
        # see if the input ends with "dontsave"
        dontsave = inp.endswith(" dontsave")

        # remove "dontsave" from the input string after checking for it
        if dontsave:
            loc = inp[:-9].strip().lower()
        else:
            loc = inp

    location = http.quote_plus(loc)

    request_url = base_url.format(api_key, "geolookup/forecast/conditions", location)
    response = http.get_json(request_url)

    if 'location' not in response:
        try:
            location_id = response['response']['results'][0]['zmw']
        except KeyError:
            return "Could not get weather for that location."

        # get the weather again, using the closest match
        request_url = base_url.format(api_key, "geolookup/forecast/conditions", "zmw:" + location_id)
        response = http.get_json(request_url)

    if response['location']['state']:
        place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                                  response['location']['state'],
                                                                  response['location']['country'])
    else:
        place_name = "\x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                      response['location']['country'])

    forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0]
    forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][1]

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place": place_name,
        "conditions": response['current_observation']['weather'],
        "temp_f": response['current_observation']['temp_f'],
        "temp_c": response['current_observation']['temp_c'],
        "humidity": response['current_observation']['relative_humidity'],
        "wind_kph": response['current_observation']['wind_kph'],
        "wind_mph": response['current_observation']['wind_mph'],
        "wind_direction": response['current_observation']['wind_dir'],
        "today_conditions": forecast_today['conditions'],
        "today_high_f": forecast_today['high']['fahrenheit'],
        "today_high_c": forecast_today['high']['celsius'],
        "today_low_f": forecast_today['low']['fahrenheit'],
        "today_low_c": forecast_today['low']['celsius'],
        "tomorrow_conditions": forecast_tomorrow['conditions'],
        "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'],
        "tomorrow_high_c": forecast_tomorrow['high']['celsius'],
        "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'],
        "tomorrow_low_c": forecast_tomorrow['low']['celsius'],
        "url": web.isgd(response["current_observation"]['forecast_url'] + "?apiref=e535207ff4757b18")
    }

    reply("{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, "
          "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, "
          "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. "
          "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, "
          "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(**weather_data))

    if location and not dontsave:
        db.execute("insert or replace into weather(nick, loc) values (:nick, :loc)",
                   {"nick": nick.lower(), "loc": loc})
        db.commit()
Ejemplo n.º 37
0
    import time
    start = time.clock()

    scantimeout = '3.0'
    podtimeout = '4.0'
    formattimeout = '8.0'
    async = 'True'

    waeo = WolframAlphaEngine(api_key, server)

    waeo.ScanTimeout = scantimeout
    waeo.PodTimeout = podtimeout
    waeo.FormatTimeout = formattimeout
    waeo.Async = async

    query = waeo.CreateQuery(http.quote_plus(inp))
    result = waeo.PerformQuery(query)
    waeqr = WolframAlphaQueryResult(result)

    results = []
    pods = waeqr.Pods()
    for pod in pods:
        waep = Pod(pod)
        subpods = waep.Subpods()
        for subpod in subpods:
            waesp = Subpod(subpod)
            plaintext = waesp.Plaintext()
            results.append(plaintext)

    try:
        waquery = re.sub(' (?:\||) +', ' ', ' '.join(results[0][0].splitlines())).strip()
Ejemplo n.º 38
0
    try:
        plat, title = args.split(' ', 1)
        if plat = 'xbone':
            plat = 'xone'
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'

    ''' result format:
    -- game result, with score
    -- subsequent results are the same structure, without first_result class
    <li class="result first_result">
        <div class="result_type">
            <strong>Game</strong>
            <span class="platform">WII</span>
Ejemplo n.º 39
0
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] | @<nick> -- gets weather data from Wunderground "\
            "http://wunderground.com/weather/api"

    if not api_key:
        return None

    # this database is used by other plugins interested in user's locations,
    # like .near in tag.py
    db.execute(
        "create table if not exists location(chan, nick, loc, lat, lon, primary key(chan, nick))"
    )

    if inp[0:1] == '@':
        nick = inp[1:].strip()
        loc = None
        dontsave = True
    else:
        dontsave = inp.endswith(" dontsave")
        # strip off the " dontsave" text if it exists and set it back to `inp` so we don't report it
        # back to the user incorrectly
        if dontsave:
            inp = inp[:-9].strip().lower()
        loc = inp

    if not loc:  # blank line
        loc = db.execute(
            "select loc from location where chan=? and nick=lower(?)",
            (chan, nick)).fetchone()
        if not loc:
            try:
                # grab from old-style weather database
                loc = db.execute("select loc from weather where nick=lower(?)",
                                 (nick, )).fetchone()
            except db.OperationalError:
                pass  # no such table
            if not loc:
                return weather.__doc__
        loc = loc[0]

    loc, _, state = loc.partition(', ')

    # Check to see if a lat, long pair is being passed. This could be done more
    # completely with regex, and converting from DMS to decimal degrees. This
    # is nice and simple, however.
    try:
        float(loc)
        float(state)

        loc = loc + ',' + state
        state = ''
    except ValueError:
        if state:
            state = http.quote_plus(state)
            state += '/'

        loc = http.quote(loc)

    url = 'http://api.wunderground.com/api/'
    query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
            .format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        return 'Could not get data from Wunderground'

    info = {}
    if 'current_observation' not in parsed_json:
        resp = 'Could not find weather for {inp}. '.format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if 'results' in parsed_json['response']:
            resp += 'Possible matches include: '
            results = parsed_json['response']['results']

            for place in results[:6]:
                resp += '{city}, '.format(**place)

                if place['state']:
                    resp += '{state}, '.format(**place)

                if place['country_name']:
                    resp += '{country_name}; '.format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json['current_observation']
    sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
    info['city'] = obs['display_location']['full']
    info['t_f'] = obs['temp_f']
    info['t_c'] = obs['temp_c']
    info['weather'] = obs['weather']
    info['h_f'] = sf['high']['fahrenheit']
    info['h_c'] = sf['high']['celsius']
    info['l_f'] = sf['low']['fahrenheit']
    info['l_c'] = sf['low']['celsius']
    info['humid'] = obs['relative_humidity']
    info['wind'] = 'Wind: {mph}mph/{kph}kph' \
        .format(mph=obs['wind_mph'], kph=obs['wind_kph'])
    reply('{city}: {weather}, {t_f}F/{t_c}C'
          '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)'
          ', Humidity: {humid}, {wind}'.format(**info))

    lat = float(obs['display_location']['latitude'])
    lon = float(obs['display_location']['longitude'])

    if inp and not dontsave:
        db.execute(
            "insert or replace into location(chan, nick, loc, lat, lon) "
            "values (?, ?, ?, ?,?)", (chan, nick.lower(), inp, lat, lon))
        db.commit()
Ejemplo n.º 40
0
def metacritic(inp):
    """mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title>
    Gets rating for <title> from metacritic on the specified medium."""

    args = inp.strip()

    game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii',
                      'vita', 'wiiu', 'xone', 'ps4')

    all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')

    try:
        plat, title = args.split(' ', 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/{}/{}/results'.format(cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'

    # get the proper result element we want to pull data from
    result = None

    if not doc.find_class('query_results'):
        return 'No results found.'

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id('search_term').value == '':
        return 'Invalid search term.'

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class('result first_result')[0]

        # find the platform, if it exists
        result_type = result.find_class('result_type')
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class('platform')
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class('result')
        for res in results:
            result_plat = res.find_class('platform')[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return 'No results found.'

    # get the name, release date, and score from the result
    product_title = result.find_class('product_title')[0]
    name = product_title.text_content()
    link = 'http://metacritic.com' + product_title.find('a').attrib['href']

    try:
        release = result.find_class('release_date')[0]. \
            find_class('data')[0].text_content()

        # strip extra spaces out of the release date
        release = re.sub(r'\s{2,}', ' ', release)
    except IndexError:
        release = None

    try:
        score = result.find_class('metascore_w')[0].text_content()
    except IndexError:
        score = None

    return '[{}] {} - \x02{}/100\x02, {} - {}'.format(plat.upper(), name, score or 'no score',
                                                      'release: \x02%s\x02' % release if release else 'unreleased',
                                                      link)
Ejemplo n.º 41
0
def weather(inp, nick=None, reply=None, db=None, notice=None):
    "weather | <location> [save] | <@ user> -- Gets weather data for <location>."
    save = True

    if '@' in inp:
        save = False
        nick = inp.split('@')[1].strip()
        loc = database.get(db, 'users', 'location', 'nick', nick)
        if not loc:
            return "No location stored for {}.".format(
                nick.encode('ascii', 'ignore'))
    else:
        loc = database.get(db, 'users', 'location', 'nick', nick)
        if not inp:
            if not loc:
                notice(weather.__doc__)
                return
        else:
            # if not loc: save = True
            if " dontsave" in inp:
                inp = inp.replace(' dontsave', '')
                save = False
            loc = inp.replace(' ', '_')  #.split()[0]

    location = http.quote_plus(loc)
    # location = location.replace(',','').replace(' ','-')

    # now, to get the actual weather
    try:
        data = get_weather('%s' % location)
    except KeyError:
        return "Could not get weather for that location."

    if location and save:
        database.set(db, 'users', 'location', location, 'nick', nick)

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place": data['location']['city'],
        "conditions": data['item']['condition']['text'],
        "temp_f": data['item']['condition']['temp'],
        "temp_c": data['item']['condition']['temp_c'],
        "humidity": data['atmosphere']['humidity'],
        "wind_kph": data['wind']['speed_kph'],
        "wind_mph": data['wind']['speed'],
        "wind_text": data['wind']['text'],
        "forecast": data['item']['forecast'][0]['text'],
        "high_f": data['item']['forecast'][0]['high'],
        "high_c": data['item']['forecast'][0]['high_c'],
        "low_f": data['item']['forecast'][0]['low'],
        "low_c": data['item']['forecast'][0]['low_c'],
        "_forecast": data['item']['forecast'][1]['text'],
        "_high_f": data['item']['forecast'][1]['high'],
        "_high_c": data['item']['forecast'][1]['high_c'],
        "_low_f": data['item']['forecast'][1]['low'],
        "_low_c": data['item']['forecast'][1]['low_c']
    }

    reply("\x02{place}\x02 - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, Humidity: {humidity}%, " \
            "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_text}, \x02Today:\x02 {forecast}, " \
            "High: {high_f}F/{high_c}C, Low: {low_f}F/{low_c}C. " \
            "\x02Tomorrow:\x02 {_forecast}, High: {_high_f}F" \
            "/{_high_c}C, Low: {_low_f}F/{_low_c}C.".format(**weather_data))
Ejemplo n.º 42
0
def metacritic(inp):
    '.mc [all|movie|tv|album|x360|ps3|pc|ds|3ds|wii|psv] <title> -- gets rating for'\
    ' <title> from metacritic on the specified medium'

    # if the results suck, it's metacritic's fault

    args = inp.strip()

    game_platforms = ('x360', 'ps3', 'pc', 'ds', 'wii', '3ds', 'gba', 'psv')
    all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')

    try:
        plat, title = args.split(' ', 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'
    ''' result format:
    -- game result, with score
    -- subsequent results are the same structure, without first_result class
    <li class="result first_result">
        <div class="result_type">
            <strong>Game</strong>
            <span class="platform">WII</span>
        </div>
        <div class="result_wrap">
            <div class="basic_stats has_score">
                <div class="main_stats">
                    <h3 class="product_title basic_stat">...</h3>
                    <div class="std_score">
                      <div class="score_wrap">
                        <span class="label">Metascore: </span>
                        <span class="data metascore score_favorable">87</span>
                      </div>
                    </div>
                </div>
                <div class="more_stats extended_stats">...</div>
            </div>
        </div>
    </li>

    -- other platforms are the same basic layout
    -- if it doesn't have a score, there is no div.basic_score
    -- the <div class="result_type"> changes content for non-games:
    <div class="result_type"><strong>Movie</strong></div>
    '''

    # get the proper result element we want to pull data from

    result = None

    if not doc.find_class('query_results'):
        return 'no results found'

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id('search_term').value == '':
        return 'invalid search term'

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class('result first_result')[0]

        # find the platform, if it exists
        result_type = result.find_class('result_type')
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class('platform')
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class('result')
        for res in results:
            result_plat = res.find_class('platform')[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return 'no results found'

    # get the name, release date, and score from the result
    product_title = result.find_class('product_title')[0]
    name = product_title.text_content()
    link = 'http://metacritic.com' + product_title.find('a').attrib['href']

    try:
        release = result.find_class('release_date')[0].\
            find_class('data')[0].text_content()

        # strip extra spaces out of the release date
        release = re.sub(r'\s{2,}', ' ', release)
    except IndexError:
        release = None

    try:
        score = result.find_class('metascore')[0].text_content()
    except IndexError:
        score = None

    return '[%s] %s - %s, %s -- %s' % (plat.upper(), name, score
                                       or 'no score', 'release: %s' % release
                                       if release else 'unreleased', link)
Ejemplo n.º 43
0
def metacritic(inp):
    """.mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title> - Gets rating for <title> from metacritic on the specified medium."""

    # if the results suck, it's metacritic's fault

    args = inp.strip()

    game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii', 'vita', 'wiiu', 'xone', 'ps4')
    all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')

    try:
        plat, title = args.split(' ', 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'

    ''' result format:
    -- game result, with score
    -- subsequent results are the same structure, without first_result class
    <li class="result first_result">
        <div class="result_type">
            <strong>Game</strong>
            <span class="platform">WII</span>
        </div>
        <div class="result_wrap">
            <div class="basic_stats has_score">
                <div class="main_stats">
                    <h3 class="product_title basic_stat">...</h3>
                    <div class="std_score">
                      <div class="score_wrap">
                        <span class="label">Metascore: </span>
                        <span class="data metascore score_favorable">87</span>
                      </div>
                    </div>
                </div>
                <div class="more_stats extended_stats">...</div>
            </div>
        </div>
    </li>

    -- other platforms are the same basic layout
    -- if it doesn't have a score, there is no div.basic_score
    -- the <div class="result_type"> changes content for non-games:
    <div class="result_type"><strong>Movie</strong></div>
    '''

    # get the proper result element we want to pull data from

    result = None

    if not doc.find_class('query_results'):
        return 'no results found'

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id('search_term').value == '':
        return 'invalid search term'

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class('result first_result')[0]

        # find the platform, if it exists
        result_type = result.find_class('result_type')
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class('platform')
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class('result')
        for res in results:
            result_plat = res.find_class('platform')[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return 'no results found'

    # get the name, release date, and score from the result
    product_title = result.find_class('product_title')[0]
    name = product_title.text_content()
    link = 'http://metacritic.com' + product_title.find('a').attrib['href']

    try:
        release = result.find_class('release_date')[0].\
            find_class('data')[0].text_content()

        # strip extra spaces out of the release date
        release = re.sub(r'\s{2,}', ' ', release)
    except IndexError:
        release = None

    try:
        score = result.find_class('metascore_w')[0].text_content()
    except IndexError:
        score = None

    return '[%s] %s - %s, %s -- %s' % (plat.upper(), name,
                                       score or 'no score',
                                       'release: %s' % release if release else 'unreleased',
                                       link)
Ejemplo n.º 44
0
    start = time.clock()

    scantimeout = '3.0'
    podtimeout = '4.0'
    formattimeout = '8.0'
    async = 'True'

    waeo = WolframAlphaEngine(api_key, server)

    waeo.ScanTimeout = scantimeout
    waeo.PodTimeout = podtimeout
    waeo.FormatTimeout = formattimeout
    waeo.Async = async

    query = waeo.CreateQuery(http.quote_plus(inp))
    result = waeo.PerformQuery(query)
    waeqr = WolframAlphaQueryResult(result)

    results = []
    pods = waeqr.Pods()
    for pod in pods:
        waep = Pod(pod)
        subpods = waep.Subpods()
        for subpod in subpods:
            waesp = Subpod(subpod)
            plaintext = waesp.Plaintext()
            results.append(plaintext)
    try:
        waquery = re.sub(' (?:\||) +', ' ',
                         ' '.join(results[0][0].splitlines())).strip().replace(
Ejemplo n.º 45
0
def metacritic(inp):
    ".mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title> -- gets rating for" " <title> from metacritic on the specified medium"

    # if the results suck, it's metacritic's fault

    args = inp.strip()

    game_platforms = (
        "x360",
        "ps3",
        "pc",
        "gba",
        "ds",
        "3ds",
        "wii",
        "vita",
        "wiiu",
        "xone",
        "ps4",
    )
    all_platforms = game_platforms + ("all", "movie", "tv", "album")

    try:
        plat, title = args.split(" ", 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = "all"
        title = args

    cat = "game" if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = "http://www.metacritic.com/search/%s/%s/results" % (cat, title_safe)

    print(url)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return "error fetching results"

    # get the proper result element we want to pull data from

    result = None

    if not doc.find_class("query_results"):
        return "no results found"

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id("primary_search_box").value == "":
        return "invalid search term"

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class("result first_result")[0]

        # find the platform, if it exists
        result_type = result.find_class("result_type")
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class("platform")
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class("result")
        for res in results:
            result_plat = res.find_class("platform")[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return "no results found"

    # get the name, release date, and score from the result
    product_title_element = result.find_class("product_title")[0]

    review = {
        "platform":
        plat.upper(),
        "title":
        product_title_element.text_content().strip(),
        "link":
        "http://metacritic.com" +
        product_title_element.find("a").attrib["href"],
    }

    try:
        score_element = result.find_class("metascore_w")[0]

        review["score"] = score_element.text_content().strip()

        review["score_color"] = get_score_color(score_element.classes)
    except IndexError:
        review["score"] = "unknown"

    return "[{platform}] {title} - \x02{score_color}{score}\x0f - {link}".format(
        **review)
Ejemplo n.º 46
0
def metacritic(inp):
    "mc [all|movie|tv|album|x360|ps3|wii|pc|ds|3ds|vita] <title> -- Gets rating for <title> from metacritic on the specified medium."

    # if the results suck, it's metacritic's fault

    args = inp.strip()

    game_platforms = ("x360", "ps3", "pc", "ds", "wii", "3ds", "gba", "psp", "vita")

    all_platforms = game_platforms + ("all", "movie", "tv", "album")

    try:
        plat, title = args.split(" ", 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = "all"
        title = args

    cat = "game" if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = "http://www.metacritic.com/search/%s/%s/results" % (cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return "error fetching results"

    """ result format:
    -- game result, with score
    -- subsequent results are the same structure, without first_result class
    <li class="result first_result">
        <div class="result_type">
            <strong>Game</strong>
            <span class="platform">WII</span>
        </div>
        <div class="result_wrap">
            <div class="basic_stats has_score">
                <div class="main_stats">
                    <h3 class="product_title basic_stat">...</h3>
                    <div class="std_score">
                      <div class="score_wrap">
                        <span class="label">Metascore: </span>
                        <span class="data metascore score_favorable">87</span>
                      </div>
                    </div>
                </div>
                <div class="more_stats extended_stats">...</div>
            </div>
        </div>
    </li>

    -- other platforms are the same basic layout
    -- if it doesn't have a score, there is no div.basic_score
    -- the <div class="result_type"> changes content for non-games:
    <div class="result_type"><strong>Movie</strong></div>
    """

    # get the proper result element we want to pull data from

    result = None

    if not doc.find_class("query_results"):
        return "No results found."

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id("search_term").value == "":
        return "Invalid search term."

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class("result first_result")[0]

        # find the platform, if it exists
        result_type = result.find_class("result_type")
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class("platform")
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class("result")
        for res in results:
            result_plat = res.find_class("platform")[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return "No results found."

    # get the name, release date, and score from the result
    product_title = result.find_class("product_title")[0]
    name = product_title.text_content()
    link = "http://metacritic.com" + product_title.find("a").attrib["href"]

    try:
        release = result.find_class("release_date")[0].find_class("data")[0].text_content()

        # strip extra spaces out of the release date
        release = re.sub(r"\s{2,}", " ", release)
    except IndexError:
        release = None

    try:
        score = result.find_class("metascore")[0].text_content()
    except IndexError:
        score = None

    return "[%s] %s - \x02%s/100\x02, %s - %s" % (
        plat.upper(),
        name,
        score or "no score",
        "release: \x02%s\x02" % release if release else "unreleased",
        link,
    )
Ejemplo n.º 47
0
def weather(inp, nick='', server='', reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] -- gets weather data from Wunderground "\
            "http://wunderground.com/weather/api"

    if not api_key:
        return None

    loc = inp

    dontsave = loc.endswith(" dontsave")
    if dontsave:
        loc = loc[:-9].strip().lower()

    db.execute("create table if not exists weather(nick primary key, loc)")

    if not loc:  # blank line
        loc = db.execute("select loc from weather where nick=lower(?)",
                         (nick, )).fetchone()
        if not loc:
            return weather.__doc__
        loc = loc[0]

    loc, _, state = loc.partition(', ')

    # Check to see if a lat, long pair is being passed. This could be done more
    # completely with regex, and converting from DMS to decimal degrees. This
    # is nice and simple, however.
    try:
        float(loc)
        float(state)

        loc = loc + ',' + state
        state = ''
    except ValueError:
        if state:
            state = http.quote_plus(state)
            state += '/'

        loc = http.quote_plus(loc)

    url = 'http://api.wunderground.com/api/'
    query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
            .format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        print 'Could not get data from Wunderground'
        return None

    info = {}
    if 'current_observation' not in parsed_json:
        resp = 'Could not find weather for {inp}. '.format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if 'results' in parsed_json['response']:
            resp += 'Possible matches include: '
            results = parsed_json['response']['results']

            for place in results[:6]:
                resp += '{city} '.format(**place)

                if place['state']:
                    resp += '{state} '.format(**place)

                if place['country_name']:
                    resp += '{country_name}, '.format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json['current_observation']
    sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
    info['city'] = obs['display_location']['full']
    info['t_f'] = obs['temp_f']
    info['t_c'] = obs['temp_c']
    info['weather'] = obs['weather']
    info['h_f'] = sf['high']['fahrenheit']
    info['h_c'] = sf['high']['celsius']
    info['l_f'] = sf['low']['fahrenheit']
    info['l_c'] = sf['low']['celsius']
    info['humid'] = obs['relative_humidity']
    info['wind'] = 'Wind: {mph}mph/{kph}kph'\
            .format(mph=obs['wind_mph'], kph=obs['wind_kph'])
    reply('{city}: {weather}, {t_f}F/{t_c}C'\
            '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)' \
            ', Humidity: {humid}, {wind}'.format(**info))

    if inp and not dontsave:
        db.execute("insert or replace into weather(nick, loc) values (?,?)",
                   (nick.lower(), inp))
        db.commit()
Ejemplo n.º 48
0
def metacritic(inp):
    """mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title>
    Gets rating for <title> from metacritic on the specified medium."""

    args = inp.strip()

    game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii',
                      'vita', 'wiiu', 'xone', 'ps4')

    all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')

    try:
        plat, title = args.split(' ', 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/{}/{}/results'.format(cat, title_safe)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'

    # get the proper result element we want to pull data from
    result = None

    if not doc.find_class('query_results'):
        return 'No results found.'

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id('search_term').value == '':
        return 'Invalid search term.'

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class('result first_result')[0]

        # find the platform, if it exists
        result_type = result.find_class('result_type')
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class('platform')
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class('result')
        for res in results:
            result_plat = res.find_class('platform')[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return 'No results found.'

    # get the name, release date, and score from the result
    product_title = result.find_class('product_title')[0]
    name = product_title.text_content()
    link = 'http://metacritic.com' + product_title.find('a').attrib['href']

    try:
        release = result.find_class('release_date')[0]. \
            find_class('data')[0].text_content()

        # strip extra spaces out of the release date
        release = re.sub(r'\s{2,}', ' ', release)
    except IndexError:
        release = None

    try:
        score = result.find_class('metascore_w')[0].text_content()
    except IndexError:
        score = None

    return '[{}] {} - \x02{}/100\x02, {} - {}'.format(plat.upper(), name, score or 'no score',
                                                      'release: \x02%s\x02' % release if release else 'unreleased',
                                                      link)
Ejemplo n.º 49
0
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] | @<nick> -- gets weather data from Wunderground "\
            "http://wunderground.com/weather/api"

    if not api_key:
        return None

    # this database is used by other plugins interested in user's locations,
    # like .near in tag.py
    db.execute(
        "create table if not exists location(chan, nick, loc, lat, lon, primary key(chan, nick))")

    if inp[0:1] == '@':
        nick = inp[1:].strip()
        loc = None
        dontsave = True
    else:
        loc = inp

        dontsave = loc.endswith(" dontsave")
        if dontsave:
            loc = loc[:-9].strip().lower()

    if not loc:  # blank line
        loc = db.execute(
            "select loc from location where chan=? and nick=lower(?)",
            (chan, nick)).fetchone()
        if not loc:
            try:
                # grab from old-style weather database
                loc = db.execute("select loc from weather where nick=lower(?)",
                                 (nick,)).fetchone()
            except db.OperationalError:
                pass    # no such table
            if not loc:
                return weather.__doc__
        loc = loc[0]

    loc, _, state = loc.partition(', ')

    # Check to see if a lat, long pair is being passed. This could be done more
    # completely with regex, and converting from DMS to decimal degrees. This
    # is nice and simple, however.
    try:
        float(loc)
        float(state)

        loc = loc + ',' + state
        state = ''
    except ValueError:
        if state:
            state = http.quote_plus(state)
            state += '/'

        loc = http.quote_plus(loc)

    url = 'http://api.wunderground.com/api/'
    query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
            .format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        return 'Could not get data from Wunderground'

    info = {}
    if 'current_observation' not in parsed_json:
        resp = 'Could not find weather for {inp}. '.format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if 'results' in parsed_json['response']:
            resp += 'Possible matches include: '
            results = parsed_json['response']['results']

            for place in results[:6]:
                resp += '{city}, '.format(**place)

                if place['state']:
                    resp += '{state}, '.format(**place)

                if place['country_name']:
                    resp += '{country_name}; '.format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json['current_observation']
    sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
    info['city'] = obs['display_location']['full']
    info['t_f'] = obs['temp_f']
    info['t_c'] = obs['temp_c']
    info['weather'] = obs['weather']
    info['h_f'] = sf['high']['fahrenheit']
    info['h_c'] = sf['high']['celsius']
    info['l_f'] = sf['low']['fahrenheit']
    info['l_c'] = sf['low']['celsius']
    info['humid'] = obs['relative_humidity']
    info['wind'] = 'Wind: {mph}mph/{kph}kph' \
        .format(mph=obs['wind_mph'], kph=obs['wind_kph'])
    reply('{city}: {weather}, {t_f}F/{t_c}C'
          '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)'
          ', Humidity: {humid}, {wind}'.format(**info))

    lat = float(obs['display_location']['latitude'])
    lon = float(obs['display_location']['longitude'])

    if inp and not dontsave:
        db.execute("insert or replace into location(chan, nick, loc, lat, lon) "
                   "values (?, ?, ?, ?,?)",        (chan, nick.lower(), inp, lat, lon))
        db.commit()
Ejemplo n.º 50
0
def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
    """weather <location> [dontsave] -- Gets weather data
    for <location> from Wunderground."""

    api_key = bot.config.get("api_keys", {}).get("wunderground")

    if not api_key:
        return "Error: No wunderground API details."

    # initialise weather DB
    db.execute("create table if not exists weather(nick primary key, loc)")

    # if there is no input, try getting the users last location from the DB
    if not inp:
        location = db.execute(
            "select loc from weather where nick=lower(:nick)", {
                "nick": nick
            }).fetchone()
        print(location)
        if not location:
            # no location saved in the database, send the user help text
            notice(weather.__doc__)
            return
        loc = location[0]

        # no need to save a location, we already have it
        dontsave = True
    else:
        # see if the input ends with "dontsave"
        dontsave = inp.endswith(" dontsave")

        # remove "dontsave" from the input string after checking for it
        if dontsave:
            loc = inp[:-9].strip().lower()
        else:
            loc = inp

    location = http.quote_plus(loc)

    request_url = base_url.format(api_key, "geolookup/forecast/conditions",
                                  location)
    response = http.get_json(request_url)

    if 'location' not in response:
        try:
            location_id = response['response']['results'][0]['zmw']
        except KeyError:
            return "Could not get weather for that location."

        # get the weather again, using the closest match
        request_url = base_url.format(api_key, "geolookup/forecast/conditions",
                                      "zmw:" + location_id)
        response = http.get_json(request_url)

    if response['location']['state']:
        place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(
            response['location']['city'], response['location']['state'],
            response['location']['country'])
    else:
        place_name = "\x02{}\x02 (\x02{}\x02)".format(
            response['location']['city'], response['location']['country'])

    forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0]
    forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][
        1]

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place":
        place_name,
        "conditions":
        response['current_observation']['weather'],
        "temp_f":
        response['current_observation']['temp_f'],
        "temp_c":
        response['current_observation']['temp_c'],
        "humidity":
        response['current_observation']['relative_humidity'],
        "wind_kph":
        response['current_observation']['wind_kph'],
        "wind_mph":
        response['current_observation']['wind_mph'],
        "wind_direction":
        response['current_observation']['wind_dir'],
        "today_conditions":
        forecast_today['conditions'],
        "today_high_f":
        forecast_today['high']['fahrenheit'],
        "today_high_c":
        forecast_today['high']['celsius'],
        "today_low_f":
        forecast_today['low']['fahrenheit'],
        "today_low_c":
        forecast_today['low']['celsius'],
        "tomorrow_conditions":
        forecast_tomorrow['conditions'],
        "tomorrow_high_f":
        forecast_tomorrow['high']['fahrenheit'],
        "tomorrow_high_c":
        forecast_tomorrow['high']['celsius'],
        "tomorrow_low_f":
        forecast_tomorrow['low']['fahrenheit'],
        "tomorrow_low_c":
        forecast_tomorrow['low']['celsius'],
        "url":
        web.isgd(response["current_observation"]['forecast_url'] +
                 "?apiref=e535207ff4757b18")
    }

    reply(
        "{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, "
        "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, "
        "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. "
        "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, "
        "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(
            **weather_data))

    if location and not dontsave:
        db.execute(
            "insert or replace into weather(nick, loc) values (:nick, :loc)", {
                "nick": nick.lower(),
                "loc": loc
            })
        db.commit()
Ejemplo n.º 51
0
def lmgtfy(inp, say=''):
    """.lmgtfy [phrase] - Posts a Google link for the specified phrase."""
    say("http://lmgtfy.com/?q={}".format(http.quote_plus(inp)))
Ejemplo n.º 52
0
def weather(inp, nick="", server="", reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] -- gets weather data from Wunderground " "http://wunderground.com/weather/api"

    if not api_key:
        return None

    loc = inp

    dontsave = loc.endswith(" dontsave")
    if dontsave:
        loc = loc[:-9].strip().lower()

    db.execute("create table if not exists weather(nick primary key, loc)")

    if not loc:  # blank line
        loc = db.execute("select loc from weather where nick=lower(?)", (nick,)).fetchone()
        if not loc:
            return weather.__doc__
        loc = loc[0]

    loc, _, state = loc.partition(", ")

    # Check to see if a lat, long pair is being passed. This could be done more
    # completely with regex, and converting from DMS to decimal degrees. This
    # is nice and simple, however.
    try:
        float(loc)
        float(state)

        loc = loc + "," + state
        state = ""
    except ValueError:
        if state:
            state = http.quote_plus(state)
            state += "/"

        loc = http.quote_plus(loc)

    url = "http://api.wunderground.com/api/"
    query = "{key}/geolookup/conditions/forecast/q/{state}{loc}.json".format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        print "Could not get data from Wunderground"
        return None

    info = {}
    if "current_observation" not in parsed_json:
        resp = "Could not find weather for {inp}. ".format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if "results" in parsed_json["response"]:
            resp += "Possible matches include: "
            results = parsed_json["response"]["results"]

            for place in results[:6]:
                resp += "{city} ".format(**place)

                if place["state"]:
                    resp += "{state} ".format(**place)

                if place["country_name"]:
                    resp += "{country_name}, ".format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json["current_observation"]
    sf = parsed_json["forecast"]["simpleforecast"]["forecastday"][0]
    info["city"] = obs["display_location"]["full"]
    info["t_f"] = obs["temp_f"]
    info["t_c"] = obs["temp_c"]
    info["weather"] = obs["weather"]
    info["h_f"] = sf["high"]["fahrenheit"]
    info["h_c"] = sf["high"]["celsius"]
    info["l_f"] = sf["low"]["fahrenheit"]
    info["l_c"] = sf["low"]["celsius"]
    info["humid"] = obs["relative_humidity"]
    info["wind"] = "Wind: {mph}mph/{kph}kph".format(mph=obs["wind_mph"], kph=obs["wind_kph"])
    reply(
        "{city}: {weather}, {t_f}F/{t_c}C"
        "(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)"
        ", Humidity: {humid}, {wind}".format(**info)
    )

    if inp and not dontsave:
        db.execute("insert or replace into weather(nick, loc) values (?,?)", (nick.lower(), inp))
        db.commit()
Ejemplo n.º 53
0
def metacritic(inp):
    '.mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title> -- gets rating for'\
    ' <title> from metacritic on the specified medium'

    # if the results suck, it's metacritic's fault

    args = inp.strip()

    game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii', 'vita',
                      'wiiu', 'xone', 'ps4')
    all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')

    try:
        plat, title = args.split(' ', 1)
        if plat not in all_platforms:
            # raise the ValueError so that the except block catches it
            # in this case, or in the case of the .split above raising the
            # ValueError, we want the same thing to happen
            raise ValueError
    except ValueError:
        plat = 'all'
        title = args

    cat = 'game' if plat in game_platforms else plat

    title_safe = http.quote_plus(title)

    url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)

    print(url)

    try:
        doc = http.get_html(url)
    except HTTPError:
        return 'error fetching results'

    # get the proper result element we want to pull data from

    result = None

    if not doc.find_class('query_results'):
        return 'no results found'

    # if they specified an invalid search term, the input box will be empty
    if doc.get_element_by_id('primary_search_box').value == '':
        return 'invalid search term'

    if plat not in game_platforms:
        # for [all] results, or non-game platforms, get the first result
        result = doc.find_class('result first_result')[0]

        # find the platform, if it exists
        result_type = result.find_class('result_type')
        if result_type:

            # if the result_type div has a platform div, get that one
            platform_div = result_type[0].find_class('platform')
            if platform_div:
                plat = platform_div[0].text_content().strip()
            else:
                # otherwise, use the result_type text_content
                plat = result_type[0].text_content().strip()

    else:
        # for games, we want to pull the first result with the correct
        # platform
        results = doc.find_class('result')
        for res in results:
            result_plat = res.find_class('platform')[0].text_content().strip()
            if result_plat == plat.upper():
                result = res
                break

    if not result:
        return 'no results found'

    # get the name, release date, and score from the result
    product_title_element = result.find_class('product_title')[0]

    review = {
        'platform':
        plat.upper(),
        'title':
        product_title_element.text_content().strip(),
        'link':
        'http://metacritic.com' +
        product_title_element.find('a').attrib['href']
    }

    try:
        score_element = result.find_class('metascore_w')[0]

        review['score'] = score_element.text_content().strip()

        review['score_color'] = get_score_color(score_element.classes)
    except IndexError:
        review['score'] = 'unknown'

    return '[{platform}] {title} - \x02{score_color}{score}\x0f - {link}'.format(
        **review)
Ejemplo n.º 54
0
def map(inp, say=None):
    """map <place>|<origin to destination> - Gets a Map of place or route from Google Maps."""
    say(
        web.try_googl("https://www.google.com/maps/?q={}".format(
            http.quote_plus(inp))))