示例#1
0
def login(user, password):
    http.jar.clear_expired_cookies()
    if any(cookie.domain == 'forums.somethingawful.com' and
           cookie.name == 'bbuserid' for cookie in http.jar):
        if any(cookie.domain == 'forums.somethingawful.com' and
               cookie.name == 'bbpassword' for cookie in http.jar):
            return
        assert("malformed cookie jar")
    user = http.quote(user)
    password = http.quote(password)
    http.get("http://forums.somethingawful.com/account.php", cookies=True,
        post_data="action=login&username=%s&password=%s" % (user, password))
示例#2
0
def check_touhou(inp,chan=None,bot=None):
    #if channel[chan]:

    channels = bot.channelconfig.walk(gather_subsection)
    for channel in channels:
        print channel

    return

    chan_url = http.quote('{channel|%s}/1' % '#pantsumen') #str(chan)
    url='http://booru.touhouradio.com/post/list/%s' % chan_url

    try: html = http.get_html(url)
    except ValueError: return None

    firstimage = html.xpath("//span[@class='thumb']//img/@src")[0]

    try:
        if firstimage in touhou_list[chan]:
            return "New Activity on TouhouRadio!"
    except: 
        pass
    
    touhou_list[chan] = firstimage
    print touhou_list[chan]
示例#3
0
def wiki(inp):
    """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    desc = text.truncate_str(desc, 200)

    return '{} :: {}'.format(desc, http.quote(url, ':/'))
示例#4
0
def wiki(inp):
    '''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
        '''article on <phrase>'''

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if items == []:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'no results found'

    def extract(item):
        return [item.find(ns + x).text for x in
                ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    if len(desc) > 300:
        desc = desc[:300] + '...'

    return '%s -- %s' % (desc, http.quote(http.unquote(url), ':/'))
示例#5
0
def wiki(inp):
    "wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if items == []:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in
                            ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    desc = text.truncate_str(desc, 250)

    return '%s -- %s' % (desc, http.quote(url, ':/'))
示例#6
0
def check_touhou(inp, chan=None, bot=None):
    #if channel[chan]:

    channels = bot.channelconfig.walk(gather_subsection)
    for channel in channels:
        print channel

    return

    chan_url = http.quote('{channel|%s}/1' % '#pantsumen')  #str(chan)
    url = 'http://booru.touhouradio.com/post/list/%s' % chan_url

    try:
        html = http.get_html(url)
    except ValueError:
        return None

    firstimage = html.xpath("//span[@class='thumb']//img/@src")[0]

    try:
        if firstimage in touhou_list[chan]:
            return "New Activity on TouhouRadio!"
    except:
        pass

    touhou_list[chan] = firstimage
    print touhou_list[chan]
示例#7
0
def oblique(inp, nick='', chan=''):
    '.o/.oblique <command> <args> -- runs <command> using oblique web'
    ' services. see http://wiki.github.com/nslater/oblique/'

    update_commands()

    if ' ' in inp:
        command, args = inp.split(None, 1)
    else:
        command = inp
        args = ''

    command = command.lower()

    if command == 'refresh':
        update_commands(True)
        return '%d commands loaded.' % len(commands)
    if command in commands:
        url = commands[command]
        url = url.replace('${nick}', nick)
        url = url.replace('${sender}', chan)
        url = url.replace('${args}', http.quote(args.encode('utf8')))
        try:
            return http.get(url)
        except http.HTTPError, e:
            return "http error %d" % e.code
示例#8
0
def wiki(inp):
    '''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
    '''article on <phrase>'''

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if items == []:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'no results found'

    def extract(item):
        return [item.find(ns + x).text for x in ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    if len(desc) > 300:
        desc = desc[:300] + '...'

    return '%s -- %s' % (desc, http.quote(url, ':/'))
示例#9
0
def tf(inp):
    """.tf/.hats <SteamID> -- Shows items waiting to be received in TF2."""

    if inp.isdigit():
        link = 'profiles'
    else:
        link = 'id'

    url = 'http://steamcommunity.com/%s/%s/tfitems?json=1' % \
        (link, http.quote(inp.encode('utf8'), safe=''))

    try:
        inv = http.get_json(url)
    except ValueError:
        return '%s is not a valid profile' % inp

    dropped, dhats, hats = 0, 0, 0
    for item, data in inv.iteritems():
        ind = int(data['defindex'])
        if data['inventory'] == 0:
            if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
                dhats += 1
            else:
                dropped += 1
        else:
            if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
                hats += 1

    return '%s has had %s items and %s hats drop (%s total hats)' %  \
        (inp, dropped, dhats, dhats + hats)
示例#10
0
def kb(inp):
    """kb <topic> -- Gets the first article available on <topic>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in
                ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = u' '.join(desc.split())  # remove excess spaces

    desc = text.truncate_str(desc, 200)

    return u'{} :: {}'.format(desc, http.quote(url, ':/'))
示例#11
0
文件: geoip.py 项目: AlumCC/Gary
def geoip(inp, api_key=None):
    """geoip <IP address> - Gets the location of an IP address."""
    url = "http://api.ipapi.com/%s" % (http.quote(inp.encode('utf8'), safe=''))

    try:
        data = http.get_json(url, access_key=api_key)
    except:
        return "I couldn't find %s" % inp

    return fformat(data).replace('in United', 'in the United')
示例#12
0
文件: tf2.py 项目: Cameri/Gary
def hats(inp, api_key=None):
    """.hats <Steam Vanity URL|Numeric Steam ID> - Shows backpack information for TF2."""

    # Get SteamID
    if inp.isdigit():
        steamid64 = inp
    else:
        try:
            id_url = 'http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key=%s&vanityurl=%s' % \
                (api_key, http.quote(inp.encode('utf8'), safe=''))
            steamid64 = http.get_json(id_url)['response']['steamid']
        except:
            return "Error getting numeric Steam ID, please try format '.hats <Numeric Steam ID>'"

    # Get Steam User's TF2 Inventory/Check for User
    try:
        inv_url = 'http://api.steampowered.com/IEconItems_440/GetPlayerItems/v0001/?SteamID=%s&key=%s' % \
            (steamid64, api_key)
        inv = http.get_json(inv_url)
    except:
        return "Sorry, I couldn't find '%s''s Steam inventory." % inp

    # Count Items into Categories
    total, dropped, dhats, dun, un, hats = 0, 0, 0, 0, 0, 0
    for x in inv["result"]["items"]:
        total += 1
        ind = int(x['defindex'])
        if x['origin'] == 0:
            if x['quality'] == 5:
                dun += 1
            if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
                dhats += 1
            else:
                dropped += 1
        else:
            if x['quality'] == 5:
                un += 1
            if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
                hats += 1

    # Get Market Price for Backpack
    try:
        backpack_url = 'http://backpack.tf/api/IGetUsers/v3/?steamids=%s' % steamid64
        backpack = http.get_json(backpack_url)
        ref = backpack['response']['players'][steamid64]['backpack_value']['440']
    except:
        ref = '???'

    return '%s has %s items, %s hats, and %s unusuals (%s/%s/%s of the ' \
        'items/hats/unusals were from drops) and has a backpack worth %s ref' %  \
        (inp, total, hats + dhats, un + dun, dropped, dhats, dun, ref)
示例#13
0
文件: steamcalc.py 项目: Cameri/Gary
def steamcalc(inp, say='', api_key=None):
    """.steamcalc <Steam Vanity URL ID> - Gets Steam account value for a given Vanity ID. Uses steamcommunity.com/id/<nickname>."""
    # Get SteamID
    try:
        steamid = http.get_json(user_url % (api_key, http.quote(inp.encode('utf8'), safe='')))['response']['steamid']
    except:
        return "'%s' does not appear to be a valid Vanity ID. Uses steamcommunity.com/id/<VanityID>." % inp

    # Get Steam profile info
    try:
        profile = http.get_json(profile_url % (api_key, steamid))['response']['players'][0]
        persona = profile['personaname']
    except:
        return "Error looking up %s's Steam profile." % inp

    # Get Steam account games for User
    try:
        account = http.get_json(account_url % (api_key, steamid))['response']['games']
        games = [str(item['appid']) for item in account]
    except:
        return "Error looking up %s's Steam inventory." % inp

    # Get info for games
    say("Collecting data for %s, please wait..." % inp)
    games_info = {}
    try:
        while games:
            games_temp, games = games[:20], games[20:]
            gurl = games_url % ','.join(games_temp)
            games_info  = dict(games_info.items() + http.get_json(gurl).items())
    except:
        return "Error looking up game data, please try again later."

    # Aggregate Steam data
    prices = []
    scores = []
    for game in games_info:
        try:
            prices.append(games_info[game]['data']['price_overview']['final'])
            scores.append(games_info[game]['data']['metacritic']['score'])
        except:
            pass #print games_info[game]

    prices = [int(price) / 100. for price in prices]
    scores = [float(score) for score in scores]

    total_price = "{0:.2f}".format(sum(prices))
    avg_score = "{0:.1f}".format(sum(scores) / len(scores))

    say("{} has {} games with a total value of ${} and an average metascore of {}".format(persona, len(games_info), total_price, avg_score))
示例#14
0
def get_series_info(token, seriesname):
    head = {'Authorization': 'Bearer ' + token}

    params = {'name': http.quote(seriesname)}
    series = http.get_json(search_url, headers=head,
                           query_params=params)['data']
    _series = [s for s in series if s['network'] is not None]

    if len(_series) > 0:
        seriesid = _series[0]['id']
    else:
        seriesid = series[0]['id']

    return http.get_json(series_url.format(seriesid), headers=head)['data']
示例#15
0
def drama(inp):
    ".drama <phrase> -- Gets the first paragraph of" "the Encyclopedia Dramatica article on <phrase>."

    j = http.get_json(api_url, search=inp)
    if not j[1]:
        return "no results found"
    article_name = j[1][0].replace(" ", "_").encode("utf8")

    url = ed_url + http.quote(article_name, "")
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            if len(summary) > 300:
                summary = summary[: summary.rfind(" ", 0, 300)] + "..."
            return "%s :: \x02%s\x02" % (summary, url)

    return "error"
示例#16
0
def get_validation_results(url):
    document = http.get_html(VALIDATOR_URL, doc=url)

    results = document.xpath('//div[@id="results"]/ol')

    if not results:
        return None

    results = results[0]

    warnings = results.xpath('//li[contains(@class, "warning")]')
    errors = results.xpath('//li[contains(@class, "error")]')

    return {
        'warning_count': len(warnings),
        'error_count': len(errors),
        'url': VALIDATOR_URL + '?doc=' + http.quote(url),
        'status': 'valid' if not errors else 'invalid'
    }
示例#17
0
def drama(inp):
    "drama <phrase> -- Gets the first paragraph of" \
    " the Encyclopedia Dramatica article on <phrase>."

    j = http.get_json(api_url, search=inp)
    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = ed_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            if len(summary) > 300:
                summary = summary[:summary.rfind(' ', 0, 300)] + "..."
            return "%s :: \x02%s\x02" % (summary, url)

    return "Unknown Error."
示例#18
0
def drama(inp):
    '''.drama <phrase> -- gets first paragraph of Encyclopedia Dramatica ''' \
    '''article on <phrase>'''

    j = http.get_json(api_url, search=inp)
    if not j[1]:
        return 'no results found'
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = ed_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = ' '.join(p.text_content().splitlines())
            if len(summary) > 300:
                summary = summary[:summary.rfind(' ', 0, 300)] + "..."
            return '%s :: \x02%s\x02' % (summary, url)

    return "error"
示例#19
0
def mcwiki(text):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    try:
        j = http.get_json(api_url, search=text)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
示例#20
0
def mcwiki(text):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    try:
        j = http.get_json(api_url, search=text)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
示例#21
0
def drama(inp):
    """drama <phrase> -- Gets the first paragraph of
    the Encyclopedia Dramatica article on <phrase>."""

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = ed_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 220)
            return "{} :: {}".format(summary, url)

    return "Unknown Error."
示例#22
0
def mcwiki(inp):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    return "Unknown Error."
示例#23
0
文件: dictionary.py 项目: Cameri/Gary
def drama(inp):
    """.drama <phrase> - gets first paragraph of Encyclopedia Dramatica article on <phrase>; Note, use proper calitalization e.g. 'Ron Paul'."""
    try:
        j = http.get_json(api_url, search=inp)
    except:
        return "Error parsing Encyclopedia Dramatica API, please try again in a few minutes"
    if not j[1]:
        return 'no results found'
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = ed_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = ' '.join(p.text_content().splitlines())
            if len(summary) > 300:
                summary = summary[:summary.rfind(' ', 0, 300)] + "..."
            return '%s :: \x02%s\x02' % (summary, url)

    return "Error"
示例#24
0
def mcwiki(inp):
    "mcwiki <phrase> -- Gets the first paragraph of" \
    " the Minecraft Wiki article on <phrase>."

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 250)
            return "%s :: \x02%s\x02" % (summary, url)

    return "Unknown Error."
示例#25
0
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] | @<nick> -- gets weather data from " \
        "Wunderground http://wunderground.com/weather/api"

    if not api_key:
        return None

    # this database is used by other plugins interested in user's locations,
    # like .near in tag.py
    db.execute("create table if not exists "
               "location(chan, nick, loc, lat, lon, primary key(chan, nick))")

    if inp[0:1] == '@':
        nick = inp[1:].strip()
        loc = None
        dontsave = True
    else:
        dontsave = inp.endswith(" dontsave")
        # strip off the " dontsave" text if it exists and set it back to `inp`
        # so we don't report it back to the user incorrectly
        if dontsave:
            inp = inp[:-9].strip().lower()
        loc = inp

    if not loc:  # blank line
        loc = db.execute(
            "select loc from location where chan=? and nick=lower(?)",
            (chan, nick)).fetchone()
        if not loc:
            try:
                # grab from old-style weather database
                loc = db.execute("select loc from weather where nick=lower(?)",
                                 (nick,)).fetchone()
            except db.OperationalError:
                pass    # no such table
            if not loc:
                return weather.__doc__
        loc = loc[0]

    params = [http.quote(p.strip()) for p in loc.split(',')]

    loc = params[0]
    state = ''

    # Try to interpret the query based on the number of commas.
    # Two commas might be city-state  city-country, or lat-long pair
    if len(params) == 2:

        state = params[1]

        # Check to see if a lat, long pair is being passed. This could be done
        # more completely with regex, and converting from DMS to decimal
        # degrees. This is nice and simple, however.
        try:
            float(loc)
            float(state)

            loc = loc + ',' + state
            state = ''
        except ValueError:
            state += '/'

    # Assume three commas is a city-state-country triplet. Discard the state
    # portion because that's what the API expects
    elif len(params) == 3:
        loc = params[0]
        state = params[2] + '/'

    url = 'http://api.wunderground.com/api/'
    query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
            .format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        return 'Could not get data from Wunderground'

    info = {}
    if 'current_observation' not in parsed_json:
        resp = 'Could not find weather for {inp}. '.format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if 'results' in parsed_json['response']:
            resp += 'Possible matches include: '
            results = parsed_json['response']['results']

            for place in results[:6]:
                resp += '{city}, '.format(**place)

                if place['state']:
                    resp += '{state}, '.format(**place)

                if place['country_name']:
                    resp += '{country_name}; '.format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json['current_observation']
    sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
    info['city'] = obs['display_location']['full']
    info['t_f'] = obs['temp_f']
    info['t_c'] = obs['temp_c']
    info['weather'] = obs['weather']
    info['h_f'] = sf['high']['fahrenheit']
    info['h_c'] = sf['high']['celsius']
    info['l_f'] = sf['low']['fahrenheit']
    info['l_c'] = sf['low']['celsius']
    info['humid'] = obs['relative_humidity']
    info['wind'] = 'Wind: {mph}mph/{kph}kph' \
        .format(mph=obs['wind_mph'], kph=obs['wind_kph'])
    reply('{city}: {weather}, {t_f}F/{t_c}C'
          '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)'
          ', Humidity: {humid}, {wind}'.format(**info))

    lat = float(obs['display_location']['latitude'])
    lon = float(obs['display_location']['longitude'])

    if inp and not dontsave:
        db.execute("insert or replace into "
                   "location(chan, nick, loc, lat, lon) "
                   "values (?, ?, ?, ?, ?)",
                   (chan, nick.lower(), inp, lat, lon))
        db.commit()
示例#26
0
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None):
    ".weather <location> [dontsave] | @<nick> -- gets weather data from " \
        "Wunderground http://wunderground.com/weather/api"

    if not api_key:
        return None

    # this database is used by other plugins interested in user's locations,
    # like .near in tag.py
    db.execute("create table if not exists "
               "location(chan, nick, loc, lat, lon, primary key(chan, nick))")

    if inp[0:1] == '@':
        nick = inp[1:].strip()
        loc = None
        dontsave = True
    else:
        dontsave = inp.endswith(" dontsave")
        # strip off the " dontsave" text if it exists and set it back to `inp`
        # so we don't report it back to the user incorrectly
        if dontsave:
            inp = inp[:-9].strip().lower()
        loc = inp

    if not loc:  # blank line
        loc = db.execute(
            "select loc from location where chan=? and nick=lower(?)",
            (chan, nick)).fetchone()
        if not loc:
            try:
                # grab from old-style weather database
                loc = db.execute("select loc from weather where nick=lower(?)",
                                 (nick, )).fetchone()
            except db.OperationalError:
                pass  # no such table
            if not loc:
                return weather.__doc__
        loc = loc[0]

    params = [http.quote(p.strip()) for p in loc.split(',')]

    loc = params[0]
    state = ''

    # Try to interpret the query based on the number of commas.
    # Two commas might be city-state  city-country, or lat-long pair
    if len(params) == 2:

        state = params[1]

        # Check to see if a lat, long pair is being passed. This could be done
        # more completely with regex, and converting from DMS to decimal
        # degrees. This is nice and simple, however.
        try:
            float(loc)
            float(state)

            loc = loc + ',' + state
            state = ''
        except ValueError:
            state += '/'

    # Assume three commas is a city-state-country triplet. Discard the state
    # portion because that's what the API expects
    elif len(params) == 3:
        loc = params[0]
        state = params[2] + '/'

    url = 'http://api.wunderground.com/api/'
    query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
            .format(key=api_key, state=state, loc=loc)
    url += query

    try:
        parsed_json = http.get_json(url)
    except IOError:
        return 'Could not get data from Wunderground'

    info = {}
    if 'current_observation' not in parsed_json:
        resp = 'Could not find weather for {inp}. '.format(inp=inp)

        # In the case of no observation, but results, print some possible
        # location matches
        if 'results' in parsed_json['response']:
            resp += 'Possible matches include: '
            results = parsed_json['response']['results']

            for place in results[:6]:
                resp += '{city}, '.format(**place)

                if place['state']:
                    resp += '{state}, '.format(**place)

                if place['country_name']:
                    resp += '{country_name}; '.format(**place)

            resp = resp[:-2]

        reply(resp)
        return

    obs = parsed_json['current_observation']
    sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
    info['city'] = obs['display_location']['full']
    info['t_f'] = obs['temp_f']
    info['t_c'] = obs['temp_c']
    info['weather'] = obs['weather']
    info['h_f'] = sf['high']['fahrenheit']
    info['h_c'] = sf['high']['celsius']
    info['l_f'] = sf['low']['fahrenheit']
    info['l_c'] = sf['low']['celsius']
    info['humid'] = obs['relative_humidity']
    info['wind'] = 'Wind: {mph}mph/{kph}kph' \
        .format(mph=obs['wind_mph'], kph=obs['wind_kph'])
    reply('{city}: {weather}, {t_f}F/{t_c}C'
          '(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)'
          ', Humidity: {humid}, {wind}'.format(**info))

    lat = float(obs['display_location']['latitude'])
    lon = float(obs['display_location']['longitude'])

    if inp and not dontsave:
        db.execute(
            "insert or replace into "
            "location(chan, nick, loc, lat, lon) "
            "values (?, ?, ?, ?, ?)", (chan, nick.lower(), inp, lat, lon))
        db.commit()