Exemplo n.º 1
0
 def _query_freebase(self, work_type, thing):
   props = FREEBASE_TYPES[work_type]
   url = "https://api.freebase.com/api/service/search?query=%s&type=%s" % (web.urlquote(thing),props['type'])
   response = simplejson.loads(web.getUrl(url, headers=HEADERS))
   if len(response['result']) == 0:
     return None
   else:
     fbid = response['result'][0]['id']
     query = {
       'escape': False,
       'query': {
         "id": fbid,
         "type": props['type'],
         "name": None,
         "limit": 1
       }
     }
     query['query'].update(props['subquery'])
     url = "https://api.freebase.com/api/service/mqlread?query=%s" % web.urlquote(simplejson.dumps(query))
     response = simplejson.loads(web.getUrl(url, headers=HEADERS))
     result = response['result']
     if result is None:
       return None
     else:
       return({
         'props': props,
         'url': "http://www.freebase.com" + result['id'],
         'title': result['name'],
         'characters': props['extractor'](result)
       })
Exemplo n.º 2
0
 def _query_freebase(self, work_type, thing):
   key = conf.get(conf.supybot.plugins.Cast.FreebaseApiKey)
   props = FREEBASE_TYPES[work_type]
   url = "https://www.googleapis.com/freebase/v1/search?query=%s&type=%s&key=%s" % (web.urlquote(thing),props['type'],key)
   response = simplejson.loads(web.getUrl(url, headers=HEADERS))
   if len(response['result']) == 0:
     return None
   else:
     fbid = response['result'][0]['id']
     query = {
         "id": fbid,
         "type": props['type'],
         "name": None,
         "limit": 1
       }
     query.update(props['subquery'])
     url = "https://www.googleapis.com/freebase/v1/mqlread?query=%s&key=%s" % (web.urlquote(simplejson.dumps(query)),key)
     response = simplejson.loads(web.getUrl(url, headers=HEADERS))
     result = response['result']
     if result is None:
       return None
     else:
       return({
         'props': props,
         'url': "http://www.freebase.com" + result['id'],
         'title': result['name'],
         'characters': props['extractor'](result)
       })
Exemplo n.º 3
0
 def _query_freebase(self, work_type, thing):
     props = FREEBASE_TYPES[work_type]
     url = "https://api.freebase.com/api/service/search?query=%s&type=%s" % (
         web.urlquote(thing), props['type'])
     response = simplejson.loads(web.getUrl(url, headers=HEADERS))
     if len(response['result']) == 0:
         return None
     else:
         fbid = response['result'][0]['id']
         query = {
             'escape': False,
             'query': {
                 "id": fbid,
                 "type": props['type'],
                 "name": None,
                 "limit": 1
             }
         }
         query['query'].update(props['subquery'])
         url = "https://api.freebase.com/api/service/mqlread?query=%s" % web.urlquote(
             simplejson.dumps(query))
         response = simplejson.loads(web.getUrl(url, headers=HEADERS))
         result = response['result']
         if result is None:
             return None
         else:
             return ({
                 'props': props,
                 'url': "http://www.freebase.com" + result['id'],
                 'title': result['name'],
                 'characters': props['extractor'](result)
             })
Exemplo n.º 4
0
 def _query_freebase(self, work_type, thing):
     key = conf.get(conf.supybot.plugins.Cast.FreebaseApiKey)
     props = FREEBASE_TYPES[work_type]
     url = "https://www.googleapis.com/freebase/v1/search?query=%s&type=%s&key=%s" % (
         web.urlquote(thing), props['type'], key)
     response = simplejson.loads(web.getUrl(url, headers=HEADERS))
     if len(response['result']) == 0:
         return None
     else:
         fbid = response['result'][0]['id']
         query = {
             "id": fbid,
             "type": props['type'],
             "name": None,
             "limit": 1
         }
         query.update(props['subquery'])
         url = "https://www.googleapis.com/freebase/v1/mqlread?query=%s&key=%s" % (
             web.urlquote(simplejson.dumps(query)), key)
         response = simplejson.loads(web.getUrl(url, headers=HEADERS))
         result = response['result']
         if result is None:
             return None
         else:
             return ({
                 'props': props,
                 'url': "http://www.freebase.com" + result['id'],
                 'title': result['name'],
                 'characters': props['extractor'](result)
             })
Exemplo n.º 5
0
 def loadByName(self, name):
     if name.isdigit():
         return self.loadById(name)
     import supybot.utils.web as web
     lines = filter(lambda x:'<id>' in x, web.getUrl('http://api.erpk.org/citizen/search/' + name '/1.xml?key=nIKh0F7U').split('\n'))
     if not lines:
         return None
     line = lines[0]
     id = line.split('>')[1].split('<')[0]
     return self.loadById(id)
Exemplo n.º 6
0
    def urbandict(self, irc, msg, args, opts, words):
        """<phrase>

        Returns the definition and usage of <phrase> from UrbanDictionary.com.
        """
        use_definition = None
        for (opt,arg) in opts:
          if opt == 'def':
            use_definition = int(arg)
        terms = ' '.join(words)
        url = 'http://www.urbandictionary.com/define.php?term=%s' \
            % web.urlquote(terms)
        html = web.getUrl(url)
        doc = fromstring(html)
        if len(doc.xpath('//div[@id="not_defined_yet"]')):
            irc.error('No definition found.', Raise=True)
        definitions = []
        for div in doc.xpath('//div[@class="definition"]'):
            text = div.text_content()
            if div.getnext().tag == 'div' \
            and div.getnext().attrib.get('class', None) == 'example':
                text += ' [example] ' + div.getnext().text_content() + ' [/example] '
            text = re.sub(r'[\\\r\\\n]+', ' ', text)
            definitions.append(text)
        if use_definition != None:
          definitions = [definitions[use_definition-1]]
        reply_msg = '%s: %s' % (terms, '; '.join(definitions))
        irc.reply(reply_msg.encode('utf8'))
Exemplo n.º 7
0
 def _fetch_json(self, url):
     doc = web.getUrl(url, headers=HEADERS)
     try:
         json = simplejson.loads(doc)
     except ValueError:
         return None
     return json
Exemplo n.º 8
0
 def _fetch_json(self, url):
     doc = web.getUrl(url, headers=HEADERS)
     try:
         json = simplejson.loads(doc)
     except ValueError:
         return None
     return json
Exemplo n.º 9
0
    def trends(self, irc, msg, args, timeframe):
        """@trends [current|daily|weekly]

        Return top trending Twitter topics for one of three timeframes:
        current, daily or weekly. Default is current.
        """

        if not timeframe:
            timeframe = "current"
        if timeframe not in ["current", "daily", "weekly"]:
            irc.reply("Invalid timeframe. Must be one of 'current', 'daily' or 'weekly'")
            return

        url = "http://search.twitter.com/trends/%s.json" % timeframe
        try:
            doc = web.getUrl(url, headers=HEADERS)
            json = simplejson.loads(doc)
        except:
            irc.reply("uh-oh, something went awry")
            return

        trends = json["trends"].values()[0]
        tnames = [x["name"] for x in trends]
        resp = ", ".join(["%d. %s" % t for t in zip(range(1, len(tnames) + 1), tnames)])
        irc.reply(resp.encode("utf8", "ignore").replace("\n", " ").strip(" "))
Exemplo n.º 10
0
 def _fetch_xml(self, function, query):
     url = "http://api.wunderground.com/auto/wui/geo/%sXML/index.xml?%s" % (
         function, urlencode({'query': query}))
     print url
     doc = web.getUrl(url, headers=HEADERS)
     # Wunderground double-encodes some of its entities, so we'll double-decode.
     return BSS(doc, convertEntities=BSS.HTML_ENTITIES)
Exemplo n.º 11
0
    def urbandict(self, irc, msg, args, opts, words):
        """<phrase>

        Returns the definition and usage of <phrase> from UrbanDictionary.com.
        """
        use_definition = None
        for (opt, arg) in opts:
            if opt == 'def':
                use_definition = int(arg)
        terms = ' '.join(words)
        url = 'http://www.urbandictionary.com/define.php?term=%s' \
            % web.urlquote(terms)
        html = web.getUrl(url)
        doc = fromstring(html)
        if len(doc.xpath('//div[@id="not_defined_yet"]')):
            irc.error('No definition found.', Raise=True)
        definitions = []
        for div in doc.xpath('//div[@class="definition"]'):
            text = div.text_content()
            if div.getnext().tag == 'div' \
            and div.getnext().attrib.get('class', None) == 'example':
                text += ' [example] ' + div.getnext().text_content(
                ) + ' [/example] '
            text = re.sub(r'[\\\r\\\n]+', ' ', text)
            definitions.append(text)
        if use_definition != None:
            definitions = [definitions[use_definition - 1]]
        reply_msg = '%s: %s' % (terms, '; '.join(definitions))
        irc.reply(reply_msg.encode('utf8'))
Exemplo n.º 12
0
    def trends(self, irc, msg, args, timeframe):
        """@trends [current|daily|weekly]

        Return top trending Twitter topics for one of three timeframes:
        current, daily or weekly. Default is current.
        """

        if not timeframe:
            timeframe = 'current'
        if timeframe not in ['current', 'daily', 'weekly']:
            irc.reply(
                "Invalid timeframe. Must be one of 'current', 'daily' or 'weekly'"
            )
            return

        url = 'http://search.twitter.com/trends/%s.json' % timeframe
        try:
            doc = web.getUrl(url, headers=HEADERS)
            json = simplejson.loads(doc)
        except:
            irc.reply("uh-oh, something went awry")
            return

        trends = json['trends'].values()[0]
        tnames = [x['name'] for x in trends]
        resp = ', '.join(
            ["%d. %s" % t for t in zip(range(1,
                                             len(tnames) + 1), tnames)])
        irc.reply(resp.encode('utf8', 'ignore').replace('\n', ' ').strip(' '))
Exemplo n.º 13
0
 def _chefjivevalleypig(self, irc, type, s):
     params = urlencode(dict(input=s,type=type))
     url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
     resp = web.getUrl(url, headers=HEADERS)
     resp = re.sub('&(ampway|emp);','&amp;',resp)
     resp = BS.BeautifulStoneSoup(resp,convertEntities=BS.BeautifulStoneSoup.HTML_ENTITIES).contents[0]
     resp = re.sub('\n', ' ', resp)
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 14
0
 def _query_tmdb(self, cmd, args):
   url = "http://api.themoviedb.org/2.1/%s/en/json/%s/%s" % (cmd,TMDBK,urllib.quote(str(args)))
   doc = web.getUrl(url, headers=HEADERS)
   try:
     json = simplejson.loads(doc)
   except ValueError:
     return None
   return json
Exemplo n.º 15
0
 def _query_tmdb(self, cmd, args):
     url = "http://api.themoviedb.org/2.1/%s/en/json/%s/%s" % (
         cmd, TMDBK, urllib.quote(str(args)))
     doc = web.getUrl(url, headers=HEADERS)
     try:
         json = simplejson.loads(doc)
     except ValueError:
         return None
     return json
Exemplo n.º 16
0
 def _yelp_api(self, params):
     p = params.copy()
     p['ywsid'] = YWSID
     url = 'http://api.yelp.com/business_review_search?' + urlencode(p)
     doc = web.getUrl(url, headers=HEADERS)
     try:
         json = simplejson.loads(doc)
     except ValueError:
         return None
     return json
Exemplo n.º 17
0
 def _yelp_api(self, params):
     p = params.copy()
     p["ywsid"] = YWSID
     url = "http://api.yelp.com/business_review_search?" + urlencode(p)
     doc = web.getUrl(url, headers=HEADERS)
     try:
         json = simplejson.loads(doc)
     except ValueError:
         return None
     return json
Exemplo n.º 18
0
 def drunk(self, irc, msg, s):
     params = urlencode(dict(text=s, voice='drunk'))
     url = 'http://www.thevoicesofmany.com/text.php?' + params
     resp = web.getUrl(url, headers=HEADERS)
     soup = BS.BeautifulSoup(resp)
     try:
         translated = soup.find('td', id='top').blockquote.string
     except:
         irc.reply("oops, didn't work")
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 19
0
 def drunk(self, irc, msg, s):
     params = urlencode(dict(text=s,voice='drunk'))
     url = 'http://www.thevoicesofmany.com/text.php?' + params
     resp = web.getUrl(url, headers=HEADERS)
     soup = BS.BeautifulSoup(resp)
     try:
         translated = soup.find('td', id='top').blockquote.string
     except:
         irc.reply("oops, didn't work")
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 20
0
 def drunk(self, irc, msg, s):
     params = urlencode(dict(text=s, voice="drunk"))
     url = "http://www.thevoicesofmany.com/text.php?" + params
     resp = web.getUrl(url, headers=HEADERS)
     soup = BS.BeautifulSoup(resp)
     try:
         translated = soup.find("td", id="top").blockquote.string
     except:
         irc.reply("oops, didn't work")
     irc.reply(resp.encode("utf-8", "ignore").strip())
Exemplo n.º 21
0
 def _chefjivevalleypig(self, irc, type, s):
     params = urlencode(dict(input=s, type=type))
     url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
     resp = web.getUrl(url, headers=HEADERS)
     resp = re.sub('&(ampway|emp);', '&amp;', resp)
     resp = BS.BeautifulStoneSoup(
         resp,
         convertEntities=BS.BeautifulStoneSoup.HTML_ENTITIES).contents[0]
     resp = re.sub('\n', ' ', resp)
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 22
0
 def _search(self, term):
     xml = web.getUrl(SERVICE_URL % urlencode({"QueryString": term}), headers=HEADERS)
     parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
     tree = etree.parse(StringIO(xml), parser)
     results = []
     for r in self._xpath(tree, "//ns:Result"):
         label = self._xpath(r, "ns:Label/text()", 0)
         uri = self._xpath(r, "ns:URI/text()", 0)
         category = self._xpath(r, "ns:Categories/ns:Category/ns:Label/text()", 0)
         results.append((label, category, uri))
     return results
Exemplo n.º 23
0
def lyricsmania_urls(artist, title):
    title_norm = normalize(title)
    artist_norm = normalize(artist)
    url = 'http://www.lyricsmania.com/%s_lyrics_%s.html' % \
        (title_norm, artist_norm)
    logger.info("Fetching %s" % url)
    html = web.getUrl(url, headers=HEADERS)
    if html.find('not in our archive') != -1:
        raise LyricsNotFound
    doc = fromstring(html)
    link = doc.xpath('//a[starts-with(@href, "/print")]')[0]
    return (url, 'http://www.lyricsmania.com/%s' % link.attrib['href'])
Exemplo n.º 24
0
 def _getJsonResponse(self,url,retries = 2):
     try:
         log.debug('Retrieving: %s' % (url))
         doc = web.getUrl(url, headers=HEADERS)
         log.debug('Response: %s' % (doc))
         response = simplejson.loads(doc)
         return response
     except web.Error, e:
         log.warning('Error: %s',str(e))
         if retries > 0:
             log.warning('Retries left: %d' % (retries))
             return self._getJsonResponse(url,retries=retries-1)
Exemplo n.º 25
0
 def _getJsonResponse(self, url, retries=2):
     try:
         log.debug('Retrieving: %s' % (url))
         doc = web.getUrl(url, headers=HEADERS)
         log.debug('Response: %s' % (doc))
         response = simplejson.loads(doc)
         return response
     except web.Error, e:
         log.warning('Error: %s', str(e))
         if retries > 0:
             log.warning('Retries left: %d' % (retries))
             return self._getJsonResponse(url, retries=retries - 1)
Exemplo n.º 26
0
def lyricsmania_urls(artist, title):
    title_norm = normalize(title)
    artist_norm = normalize(artist)
    url = 'http://www.lyricsmania.com/%s_lyrics_%s.html' % \
        (title_norm, artist_norm)
    logger.info("Fetching %s" % url)
    html = web.getUrl(url, headers=HEADERS)
    if html.find('not in our archive') != -1:
        raise LyricsNotFound
    doc = fromstring(html)
    link = doc.xpath('//a[starts-with(@href, "/print")]')[0]
    return (url, 'http://www.lyricsmania.com/%s' % link.attrib['href'])
Exemplo n.º 27
0
def lyricsmania(artist, title):
    try:
        (ref_url, print_url) = lyricsmania_urls(artist, title)
        logger.info("Fetching %s" % print_url)
        headers = HEADERS.copy()
        headers['Referer'] = ref_url
        html = web.getUrl(print_url, headers=headers)
        doc = fromstring(html)
        lyrics = doc.xpath('//div[@id="printprintx"]')[0]
        return {
            'artist': artist,
            'song': title,
            'lyrics': lyrics.text_content()
        }
    except LyricsNotFound:
        return None
Exemplo n.º 28
0
    def untiny(self, irc, msg, args, url):
        """<url>

        Return the whole URL for a tiny URL."""
        data = json.loads(getUrl(self.registryValue('service') % url).decode())
        if 'org_url' in data:
            irc.reply(data['org_url'])
        elif 'error' in data:
            num, msg = data['error']
            messages = {
                    '0': _('Invalid URL'),
                    '1': _('Unsupported tinyurl service'),
                    '2': _('Connection to tinyurl service failed'),
                    '3': _('Unable to get the original URL'),
                    }
            irc.error(messages[num])
Exemplo n.º 29
0
 def sabram(self, irc, msg, args):
     """ [<text>]
     Get @sabram to falsely attribute a quote to Cliff!
     """
     template = '<sabram> Cliff said: "%s"'
     if args:
         irc.reply(template % ' '.join(args))
         return
     url = "http://www.ivyjoy.com/quote.shtml"
     try:
         resp = web.getUrl(url, headers={'User-agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13'})
         soup = BS.BeautifulSoup(resp)
         quotation = soup.find('font').contents[0].strip()
     except:
         irc.reply(template % "Some stupid error occurred")
     irc.reply(template % quotation, prefixNick=False)
Exemplo n.º 30
0
 def unicode(self, irc, msg, args, query):
   """[query] - Look up unicode character details
   """
   url = "http://unicodelookup.com/lookup?"
   url = url + urlencode({'q' : query, 'o' : 0})
   doc = web.getUrl(url, headers=HEADERS)
   try:
     json = simplejson.loads(doc)
     responses = []
     for result in json['results']:
       ucode = result[2].replace('0x','U+')
       responses.append('%s (%s): %s [HTML: %s / Decimal: %s / Hex: %s]' % (ucode, result[5], result[4], result[3], result[1], result[2]))
     response = '; '.join(responses).encode('utf8','ignore')
     irc.reply(response)
   except ValueError:
     irc.reply('No unicode characters matching /' + query + '/ found.')
Exemplo n.º 31
0
 def sabram(self, irc, msg, args):
     """ [<text>]
     Get @sabram to falsely attribute a quote to Cliff!
     """
     template = '<sabram> Cliff said: "%s"'
     if args:
         irc.reply(template % ' '.join(args))
         return
     url = "http://www.ivyjoy.com/quote.shtml"
     try:
         resp = web.getUrl(url, headers={'User-agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13'})
         soup = BS.BeautifulSoup(resp)
         quotation = soup.find('font').contents[0].strip()
     except:
         irc.reply(template % "Some stupid error occurred")
     irc.reply(template % quotation, prefixNick=False)
Exemplo n.º 32
0
def lyricsmania(artist, title):
    try:
        (ref_url, print_url) = lyricsmania_urls(artist, title)
        logger.info("Fetching %s" % print_url)
        headers = HEADERS.copy()
        headers['Referer'] = ref_url
        html = web.getUrl(print_url, headers=headers)
        doc = fromstring(html)
        lyrics = doc.xpath('//div[@id="printprintx"]')[0]
        return {
            'artist': artist,
            'song': title,
            'lyrics': lyrics.text_content()
            }
    except LyricsNotFound:
        return None
Exemplo n.º 33
0
    def asteroid(self, irc, msg, args):
        """
        Fetch the next Potentially Hazardous Asteroid as
        reported by the Near Earth Object Program
        (http://www.cfa.harvard.edu/iau/lists/PHACloseApp.html)
        """
        
        # stupid astronomers and their stupid <pre> data
        # example of parsed pha
        # ('2002 AT4  ',        => object name
        #  '2511159.67',        => julian date
        #  '2163 Mar. 22.17',   => calendar date
        #  '0.05000')           => distance in AU
        pattern = re.compile('\s*([\(\)\w ]+?  )\s*([\d\.]+)\s*(\d{4} [a-z\.]+\s*[\d\.]+)\s*([\d\.]+)', re.I)

        # get now in julian
        julian_now = (time.time() / 86400.0) + 2440587

        url = 'http://www.cfa.harvard.edu/iau/lists/PHACloseApp.html'
        html = web.getUrl(url, headers=HEADERS)
        tree = fromstring(html)
        pre = tree.xpath('//pre')[0]
        lines = pre.text.split('\n')[3:]
        lines = [l for l in lines if len(l)]

        # match group tuples become our object data 
        phas = [re.match(pattern, l).groups() for l in lines]
        
        # ignore those earlier than now
        phas = [p for p in phas if float(p[1]) > julian_now]

        # sort by julian date
        phas.sort(lambda a,b: cmp(float(a[1]), float(b[1])))

        # grab the next event
        (name, jd, date, au) = phas[0]                
        date = date.replace('.', ' ')

        # the %j is just a placeholder
        date = datetime.strptime(date, "%Y %b %d %j")

        # convert AU to miles
        miles = float(au) * 92955887.6

        resp = "Object '%s' will pass within %s miles of earth on %s"
        irc.reply(resp % (name.strip(), miles, date.strftime("%A, %B %d, %Y")))
Exemplo n.º 34
0
def songlist(artist, searchstring=None):

    artist = normalize(artist)
    url = 'http://lyricsmania.com/%s_lyrics.html' % artist
    logger.info("Fetching " + url)
    html = web.getUrl(url, headers=HEADERS)
    doc = fromstring(html)

    titles = []
    for a in doc.xpath('//a'):
        if a.attrib.has_key('href') \
        and a.attrib['href'].endswith("_lyrics_%s.html" % artist):
            song = a.text_content()
            if searchstring:
                if not re.search(searchstring, song, re.I):
                    continue
            titles.append(song)

    return [re.sub(' lyrics$', '', x) for x in titles]
Exemplo n.º 35
0
    def gender(self, irc, msg, args, name):
        """<name>
        
        Returns gender data on name usage from Freebase:
        http://genderednames.freebaseapps.com/
        """

        url = API_URL % web.urlquote(name)
        json = web.getUrl(url, headers=HEADERS)
        response = simplejson.loads(json)

        if not response['total']:
            irc.reply("The name '%s' was not found on Freebase" % response['name'])
            return

        female_percentage = percentage(response['female'], response['total'])
        male_percentage = percentage(response['male'], response['total'])
        irc.reply("'%s': %s%% female; %s%% male" % (response['name'],
            female_percentage, male_percentage), prefixNick=True)
Exemplo n.º 36
0
 def _untiny(self, irc, url):
     data = json.loads(getUrl(self.registryValue('service') % url).decode())
     if 'org_url' in data:
         if irc:
             irc.reply(data['org_url'])
         else:
             return data['org_url']  # Used by other plugins
     elif 'error' in data:
         num, msg = data['error']
         messages = {
             '0': _('Invalid URL'),
             '1': _('Unsupported tinyurl service'),
             '2': _('Connection to tinyurl service failed'),
             '3': _('Unable to get the original URL'),
         }
         if irc:
             irc.error(messages[num])
         else:
             return url
Exemplo n.º 37
0
def songlist(artist, searchstring=None):

    artist = normalize(artist)
    url = 'http://lyricsmania.com/%s_lyrics.html' % artist
    logger.info("Fetching " + url)
    html = web.getUrl(url, headers=HEADERS)
    doc = fromstring(html)
    
    titles = []
    for a in doc.xpath('//a'):
        if a.attrib.has_key('href') \
        and a.attrib['href'].endswith("_lyrics_%s.html" % artist):
            song = a.text_content()
            if searchstring:
                if not re.search(searchstring, song, re.I):
                    continue
            titles.append(song)

    return [re.sub(' lyrics$', '', x) for x in titles]
Exemplo n.º 38
0
 def _untiny(self, irc, url):
     data = json.loads(getUrl(self.registryValue('service') % url).decode())
     if 'org_url' in data:
         if irc:
             irc.reply(data['org_url'])
         else:
             return data['org_url'] # Used by other plugins
     elif 'error' in data:
         num, msg = data['error']
         messages = {
                 '0': _('Invalid URL'),
                 '1': _('Unsupported tinyurl service'),
                 '2': _('Connection to tinyurl service failed'),
                 '3': _('Unable to get the original URL'),
                 }
         if irc:
             irc.error(messages[num])
         else:
             return url
Exemplo n.º 39
0
    def twanalyze(self, irc, msg, args, user):
        """@twanalyze user

        See the http://twanalsyt.com personality test result
        for any twitter user
        """
        url = "http://twanalyst.com/%s" % quote(user)
        doc = web.getUrl(url, headers=HEADERS)
        html = lxml.html.fromstring(doc)
        try:
            link = html.xpath('//a[contains(@href, "twitter.com/?status")]')[0]
            href = link.attrib["href"]
            parsed = parse_qs(href)
            status = parsed["http://twitter.com/?status"][0]
            m = re.match("My Twitter personality: (.+?) My style: (.+?) ([A-Z]+)", status)
            resp = "Personality: %s, Style: %s, Ranking: %s" % m.groups()
            irc.reply(resp)
        except Exception, e:
            print >>sys.stderr, e
            irc.reply("blerg! scraping FAIL")
Exemplo n.º 40
0
 def unicode(self, irc, msg, args, query):
     """[character]
   Look up unicode character details
   """
     url = "http://unicodelookup.com/lookup?"
     url = url + urlencode({'q': query, 'o': 0})
     data = web.getUrl(url)
     try:
         data = json.loads(data)
         responses = []
         for result in data['results']:
             ucode = result[2].replace('0x', 'U+')
             name = unicodedata.name('{0}'.format(query))
             responses.append(
                 '%s (%s): %s [HTML: %s / Decimal: %s / Hex: %s]' %
                 (ucode, name, result[4], result[3], result[1], result[2]))
         response = '; '.join(responses)
         irc.reply(response)
     except ValueError:
         irc.reply('No unicode characters matching /' + query + '/ found.')
Exemplo n.º 41
0
 def unicode(self, irc, msg, args, query):
     """[character]
     Look up unicode character details
     """
     url = "http://unicodelookup.com/lookup?"
     url = url + urlencode({"q": query, "o": 0})
     data = web.getUrl(url)
     try:
         data = json.loads(data)
         responses = []
         for result in data["results"]:
             ucode = result[2].replace("0x", "U+")
             name = unicodedata.name("{0}".format(query))
             responses.append(
                 "%s (%s): %s [HTML: %s / Decimal: %s / Hex: %s]" %
                 (ucode, name, result[4], result[3], result[1], result[2]))
         response = "; ".join(responses)
         irc.reply(response)
     except ValueError:
         irc.reply("No unicode characters matching /" + query + "/ found.")
Exemplo n.º 42
0
    def gender(self, irc, msg, args, name):
        """<name>
        
        Returns gender data on name usage from Freebase:
        http://genderednames.freebaseapps.com/
        """

        url = API_URL % web.urlquote(name)
        json = web.getUrl(url, headers=HEADERS)
        response = simplejson.loads(json)

        if not response["total"]:
            irc.reply("The name '%s' was not found on Freebase" % response["name"])
            return

        female_percentage = percentage(response["female"], response["total"])
        male_percentage = percentage(response["male"], response["total"])
        irc.reply(
            "'%s': %s%% female; %s%% male" % (response["name"], female_percentage, male_percentage), prefixNick=True
        )
Exemplo n.º 43
0
    def mortgage(self, irc, msg, args):
        """
        Returns latest mortgage rates summary from Zillow --
        http://www.zillow.com/howto/api/APIOverview.htm
        """
        url = API_URL % ('GetRateSummary', ZWSID)
        json = web.getUrl(url, headers=HEADERS)
        response = simplejson.loads(json)
        rates = response['response']

        o = "The average rate on a 30 year mortgage is %s. Last week it was %s. " + \
        "If you want a 15 year mortgage the average rate is %s. Last week it was %s. " + \
        "If you're crazy enough to want a 5-1 ARM the average rate is %s. Last week it was %s. " + \
        "This is according to the Zillow Mortgage Market."

        resp = o % (
            rates['today']['thirtyYearFixed'], rates['lastWeek']['thirtyYearFixed'],
            rates['today']['fifteenYearFixed'], rates['lastWeek']['fifteenYearFixed'],
            rates['today']['fiveOneARM'], rates['lastWeek']['fiveOneARM'])
        
        irc.reply(resp)
Exemplo n.º 44
0
    def twanalyze(self, irc, msg, args, user):
        """@twanalyze user

        See the http://twanalsyt.com personality test result
        for any twitter user
        """
        url = 'http://twanalyst.com/%s' % quote(user)
        doc = web.getUrl(url, headers=HEADERS)
        html = lxml.html.fromstring(doc)
        try:
            link = html.xpath('//a[contains(@href, "twitter.com/?status")]')[0]
            href = link.attrib['href']
            parsed = parse_qs(href)
            status = parsed['http://twitter.com/?status'][0]
            m = re.match(
                'My Twitter personality: (.+?) My style: (.+?) ([A-Z]+)',
                status)
            resp = "Personality: %s, Style: %s, Ranking: %s" % m.groups()
            irc.reply(resp)
        except Exception, e:
            print >> sys.stderr, e
            irc.reply('blerg! scraping FAIL')
Exemplo n.º 45
0
    def mortgage(self, irc, msg, args):
        """
        Returns latest mortgage rates summary from Zillow --
        http://www.zillow.com/howto/api/APIOverview.htm
        """
        url = API_URL % ('GetRateSummary', ZWSID)
        json = web.getUrl(url, headers=HEADERS)
        response = simplejson.loads(json)
        rates = response['response']

        o = "The average rate on a 30 year mortgage is %s. Last week it was %s. " + \
        "If you want a 15 year mortgage the average rate is %s. Last week it was %s. " + \
        "If you're crazy enough to want a 5-1 ARM the average rate is %s. Last week it was %s. " + \
        "This is according to the Zillow Mortgage Market."

        resp = o % (rates['today']['thirtyYearFixed'],
                    rates['lastWeek']['thirtyYearFixed'],
                    rates['today']['fifteenYearFixed'],
                    rates['lastWeek']['fifteenYearFixed'],
                    rates['today']['fiveOneARM'],
                    rates['lastWeek']['fiveOneARM'])

        irc.reply(resp)
Exemplo n.º 46
0
 def _chefjivevalleypig(self, irc, type, s):
     params = urlencode(dict(input=s,type=type))
     url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
     resp = web.getUrl(url, headers=HEADERS)
     resp = re.sub('\n', ' ', resp)
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 47
0
 def _chefjivevalleypig(self, irc, type, s):
     params = urlencode(dict(input=s, type=type))
     url = "http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?" + params
     resp = web.getUrl(url, headers=HEADERS)
     resp = re.sub("\n", " ", resp)
     irc.reply(resp.encode("utf-8", "ignore").strip())
Exemplo n.º 48
0
 def _openUrl(self, url, postdata={}):
     data = urllib.urlencode(postdata)
     request = urllib2.Request(url, data=data)
     return web.getUrl(request)
Exemplo n.º 49
0
 def _openUrl(self, url, postdata={}):
     data = urllib.urlencode(postdata)
     request = urllib2.Request(url, data=data)
     return web.getUrl(request)
Exemplo n.º 50
0
 def _chefjivevalleypig(self, irc, type, s):
     params = urlencode(dict(input=s,type=type))
     url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
     resp = web.getUrl(url, headers=HEADERS)
     resp = re.sub('\n', ' ', resp)
     irc.reply(resp.encode('utf-8', 'ignore').strip())
Exemplo n.º 51
0
 def _fetch_xml(self, function, query):
   url = "http://api.wunderground.com/auto/wui/geo/%sXML/index.xml?%s" % (function, urlencode({'query' : query }))
   print url
   doc = web.getUrl(url, headers=HEADERS)
   # Wunderground double-encodes some of its entities, so we'll double-decode.
   return BSS(doc, convertEntities=BSS.HTML_ENTITIES)