Beispiel #1
0
 def rate_rt_audience(self, name):
     """Audience Rating from rotten tomatoes"""
     soup = getsoup(self.rt_search, {'search': name}, referer=self.rt_url)
     ourname = self.normalize(name)
     results = soup.find('ul', id='movie_results_ul')
     if results is None:
         rating = soup.find(name="span",
                            attrs={
                                "class": "meter popcorn numeric "
                            }).renderContents() + "%"
         title = strip_html(
             encode(
                 soup.find('h1', 'movie_title').renderContents(),
                 'utf-8')).strip()
         return title, rating
     else:
         for result in results('li'):
             try:
                 title = strip_html(
                     result.find('div',
                                 'media_block_content').h3.a.renderContents(
                                 )).strip()
                 if ourname == self.normalize(title):
                     url = result.h3.a['href']
                     innerSoup = getsoup(self.rt_url + url, {},
                                         self.rt_search, {'search': name})
                     rating = innerSoup.find(name="span",
                                             attrs={
                                                 "class":
                                                 "meter popcorn numeric "
                                             }).renderContents() + "%"
                     return title, rating
             except AttributeError:
                 pass
         return
Beispiel #2
0
    def response(self, nick, args, kwargs):
        url = urlunparse(('https', 'www.google.com', 'search', '',
                          urlencode({
                              'num': '100',
                              'safe': 'off',
                              'hl': 'en',
                              'q': 'site:songmeanings.com ' + args[0]
                          }), ''))
        soup = getsoup(url)
        new = None
        for h3 in soup.findAll('h3', attrs={'class': 'r'}):
            uri = urlparse(h3.a['href'])
            if uri.path == '/url':
                url = dict(parse_qsl(uri.query))['q']
                uri = urlparse(url)
                if re.search('/songs/view/\d+', uri.path) is not None:
                    new = urlunparse(uri._replace(query='', fragment=''))
                    break
                elif re.search(
                        '/profiles/(submissions|interaction)/\d+/comments',
                        uri.path) is not None:
                    soup = getsoup(url)
                    for a in soup.find('a', title='Direct link to comment'):
                        new = urlunparse(
                            urlparse(a.parent['href'])._replace(fragment='',
                                                                query=''))
                        break
                if new:
                    break
        if new:
            url = new
            try:
                soup = getsoup(url)
                try:
                    title = re.sub('\s+Lyrics\s+\|\s+SongMeanings.*$', '',
                                   soup.title.renderContents())
                except StandardError:
                    title = 'Unknown artist/song, check parsing code!'
                text = soup.find('div',
                                 attrs={'class': re.compile(r'.*lyric-box.*')})
                for a in text('a'):
                    a.extract()
            except StandardError:
                self.log.warn(
                    'unable to find textblock from url {0!r} (query: {1!r})'.
                    format(url, args[0]))
                return u'{nick}: {error}'.format(error=self.error, **kwargs)

            try:
                lyrics = decode(text.renderContents(), 'utf-8')
                return u'\n'.join(['[{}]'.format(title)] + filter(
                    None,
                    [line.strip()
                     for line in strip_html(lyrics).splitlines()]))
            except StandardError:
                self.log.exception(
                    'error parsing lyrics for query: {0!r}'.format(args[0]))
                return u'{nick}: {error}'.format(error=self.error, **kwargs)
Beispiel #3
0
 def response(self, nick, args, kwargs):
     page = getsoup('http://beeradvocate.com/search', {
         'q': args[0],
         'qt': 'beer',
         'ls': 'Y',
         'retired': 'N'
     })
     page = page.find('div', id='baContent')
     page = getsoup(
         urljoin('http://beeradvocate.com/',
                 page.ul.findAll('li')[0].a['href']))
     return page.find('meta', property='og:description')['content']
Beispiel #4
0
 def response(self, nick, args, kwargs):
     soup = getsoup(self.spec_url % int(args[0]) if args[0] else self.rand_url)
     soup.find('div', id='submit').extract()
     post = soup.body.find('div', 'post')
     id = int(post.find('a', 'fmllink')['href'].split('/')[-1])
     body = strip_html(decode(' '.join(link.renderContents() for link in post('a', 'fmllink')), 'utf-8'))
     return u'%s: (%d) %s' % (nick, id, body)
Beispiel #5
0
 def rate_rt(self, name):
     """Rating from rotten tomatoes"""
     soup = getsoup(self.rt_search, {'search': name}, referer=self.rt_url)
     ourname = self.normalize(name)
     results = soup.find('ul', id='movie_results_ul')
     if results is None:
         rating = soup.find('span',
                            id='all-critics-meter').renderContents() + '%'
         title = strip_html(
             encode(
                 soup.find('h1', 'movie_title').renderContents(),
                 'utf-8')).strip()
         return title, rating
     else:
         for result in results('li'):
             try:
                 rating = strip_html(
                     result.find('span',
                                 'tMeterScore').renderContents()).strip()
                 title = strip_html(
                     result.find('div',
                                 'media_block_content').h3.a.renderContents(
                                 )).strip()
                 if ourname == self.normalize(title):
                     return title, rating
             except AttributeError:
                 pass
 def response(self, nick, args, kwargs):
     soup = getsoup(self.spec_url % int(args[0]) if args[0] else self.rand_url)
     soup.find('div', id='submit').extract()
     post = soup.body.find('div', 'post')
     return u'%s: (%d) %s' % (nick, int(post.find('a', 'fmllink')['href'].split('/')[-1]),
                              strip_html(' '.join(link.renderContents()
                                                 for link in post('a', 'fmllink')).decode('utf-8', 'ignore')))
Beispiel #7
0
    def getrandom(self, times=3):
        """".how (times) - Gives you random instructions from wikiHow, by default 3 steps"""
        steps = []
        for i in xrange(times):
            page = getsoup("http://www.wikihow.com/Special:Randomizer")
            section = page.find("div", {"id": "steps"})
            if section:  # Only one 'method'
                allsteps = section.find("ol").findChildren("li",
                                                           recursive=False)
            else:  # Multiple 'methods', each with their own list of steps
                for x in xrange(1, 5):
                    try:
                        section = page.find("div",
                                            {"id": "steps_{}".format(x)})
                        try:
                            # Possible for a Method to have no actual steps, just a paragraph, so check for the list
                            allsteps = section.find("ol").findChildren(
                                "li", recursive=False)
                            break
                        except _notrap:
                            raise
                        except:
                            continue
                    except _notrap:
                        raise
                    except:
                        break

            steps.append(random.choice(allsteps))

        results = []
        for i, step in enumerate(steps):
            tag = step.find("b", {"class": "whb"})
            results.append(u'Step #{}: {}'.format(i + 1, decode(tag.text)))
        return results
Beispiel #8
0
    def getweather(self, location):
        """Look up NOAA weather"""
        soup = getsoup(self.noaa_search, {'inputstring': location},
                       referer=self.noaa_url)

        # jesus f*****g christ, their html is bad.. looks like 1987
        # nested tables, font tags, and not a single class or id.. good game
        current = soup.find('img', alt='Current Local Weather')
        if not current:
            return u'NOAA website is having issues'
        current = current.findNext('table').table.table
        temp = current.td.font.renderContents().replace('<br />', '|')
        temp = strip_html(temp.decode('utf-8')).replace('\n', '').strip()
        cond, _, tempf, tempc = temp.split('|')
        tempc = tempc.replace('(', '').replace(')', '')
        tempf, tempc = self.docolor(tempf, tempc)
        other = current.table
        items = [u'%s (%s) - %s' % (tempf, tempc, cond)]
        for row in other('tr'):
            if row.a:
                continue
            cells = row('td')
            key = self.render(cells[0])
            val = self.render(cells[1])
            items.append(u'%s %s' % (key, val))
        return u', '.join(items)
Beispiel #9
0
 def response(self, nick, args, kwargs):
     page = 1
     players = []
     while page:
         url = self.group_url + '?p=%d' % page
         soup = getsoup(url)
         next = soup.body.find('div', 'pageLinks').find(text=self.next_re)
         if next is None:
             page = None
         else:
             page = int(next.parent['href'].split('=', 1)[-1])
         for player in soup.body('div', attrs={'class': self.status_re}):
             name = strip_html(player.p.a.renderContents())
             game = player.find('span', 'linkFriend_in-game')
             if game is None:
                 if settings.STEAM_SHOW_ONLINE:
                     status = 'Online'
                 else:
                     status = None
             else:
                 status = strip_html(
                     game.renderContents()).split('\n')[-1].replace(
                         ' - Join', '')
             if status:
                 players.append('%s: %s' % (name, status))
     if players:
         return u'\n'.join(players)
     return u'No one online.'
Beispiel #10
0
    def getweather(self, location):
        """Look up NOAA weather"""
        soup = getsoup(self.noaa_search, {'inputstring': location},
                       referer=self.noaa_url)

        # jesus f*****g christ, their html is bad.. looks like 1987
        # nested tables, font tags, and not a single class or id.. good game
        current = soup.find('img', alt='Current Local Weather')
        if not current:
            return u'NOAA website is having issues'
        current = current.findNext('table').table.table
        temp = current.td.font.renderContents().replace('<br />', '|')
        temp = strip_html(decode(temp, 'utf-8')).replace('\n', '').strip()
        cond, _, tempf, tempc = temp.split('|')
        tempc = tempc.replace('(', '').replace(')', '')
        tempf, tempc = self.docolor(tempf, tempc)
        other = current.table
        items = [u'%s (%s) - %s' % (tempf, tempc, cond)]
        for row in other('tr'):
            if row.a:
                continue
            cells = row('td')
            key = self.render(cells[0])
            val = self.render(cells[1])
            items.append(u'%s %s' % (key, val))
        return u', '.join(items)
Beispiel #11
0
    def lookup_verse(self, query, book=None):
        """Lookup specified verse"""
        if book is None:
            book = self.DEFAULT_BIBLE
        elif book not in self.bibles:
            return u'Unknown bible.. why do you hate god so much?'
        opts = {'search': query, 'version': book}
        soup = getsoup(self.bg_search, opts, referer=self.bg_search)
        passage = soup.find('div', 'passage-wrap')
        for name in 'heading passage-class-0', 'publisher-info-bottom':
            passage.find('div', name).extract()
        response = []
        for para in passage('p'):
            response.append(para.renderContents())
        res = decode(' '.join(response), 'utf-8')

        # convert superscript verse markers to unicode
        while True:
            match = self.sup_re.search(res)
            if not match:
                break
            res = res.replace(match.group(0), superscript(match.group(1)))

        # XXX this is like this for a reason
        res = strip_html(res).replace(u'\xa0', u' ')
        while u'  ' in res:
            res = res.replace(u'  ', u' ')
        res = res.strip()
        return res
Beispiel #12
0
    def getrandom(self, times=3):
        """".how (times) - Gives you random instructions from wikiHow, by default 3 steps"""
        steps = []
        for i in xrange(times):
            page = getsoup("http://www.wikihow.com/Special:Randomizer")
            section = page.find("div", {"id": "steps"})
            if section:  # Only one 'method'
                allsteps = section.find("ol").findChildren("li", recursive=False)
            else:  # Multiple 'methods', each with their own list of steps
                for x in xrange(1, 5):
                    try:
                        section = page.find("div", {"id": "steps_{}".format(x)})
                        try:
                            # Possible for a Method to have no actual steps, just a paragraph, so check for the list
                            allsteps = section.find("ol").findChildren("li", recursive=False)
                            break
                        except _notrap:
                            raise
                        except:
                            continue
                    except _notrap:
                        raise
                    except:
                        break

            steps.append(random.choice(allsteps))

        results = []
        for i, step in enumerate(steps):
            tag = step.find("b", {"class": "whb"})
            results.append(u"Step #{}: {}".format(i + 1, decode(tag.text)))
        return results
Beispiel #13
0
    def response(self, nick, args, kwargs):
        try:
            url = self.google.lucky(u'site:songmeanings.net ' + args[0])
        except NonRedirectResponse:
            self.log.warn(
                'no url for query {0!r} found from google lucky'.format(
                    args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)

        try:
            soup = getsoup(url)
            try:
                title = strip_html(
                    soup.find('a', 'pw_title').renderContents()).strip()
            except StandardError:
                title = 'Unknown artist/song, check parsing code!'
            text = soup.find('div', id='textblock')
        except StandardError:
            self.log.warn(
                'unable to find textblock from url {0!r} (query: {1!r})'.
                format(url, args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)

        try:
            lyrics = decode(text.renderContents(), 'utf-8')
            return u'\n'.join(['[{}]'.format(title)] + filter(
                None,
                [line.strip() for line in strip_html(lyrics).splitlines()]))
        except StandardError:
            self.log.exception('error parsing lyrics for query: {0!r}'.format(
                args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)
Beispiel #14
0
 def response(self, nick, args, kwargs):
     page = 1
     players = []
     while page:
         url = self.group_url + "?p=%d" % page
         soup = getsoup(url)
         next = soup.body.find("div", "pageLinks").find(text=self.next_re)
         if next is None:
             page = None
         else:
             page = int(next.parent["href"].split("=", 1)[-1])
         for player in soup.body("div", attrs={"class": self.status_re}):
             name = strip_html(player.p.a.renderContents())
             game = player.find("span", "linkFriend_in-game")
             if game is None:
                 if settings.STEAM_SHOW_ONLINE:
                     status = "Online"
                 else:
                     status = None
             else:
                 status = strip_html(game.renderContents()).split("\n")[-1].replace(" - Join", "")
             if status:
                 players.append("%s: %s" % (name, status))
     if players:
         return u"\n".join(players)
     return u"No one online."
Beispiel #15
0
 def response(self, nick, args, kwargs):
     soup = getsoup(self.url, {"InData": args[0]})
     city = soup.body.find("table", bgcolor="#ffffcc").a
     return u"%s: %s: %s, %s" % (
         nick,
         args[0],
         proper(render(city).capitalize()),
         proper(render(city.parent.findNext("td"))),
     )
Beispiel #16
0
 def response(self, nick, args, kwargs):
     opts = {'hl': 'en', 'safe': 'off', 'q': args[0]}
     soup = getsoup(self.google_search, opts, referer=self.google_url)
     correct = soup.body.find('a', href=re.compile(r'^/search.*spell=1'))
     if correct:
         res = strip_html(decode(correct.renderContents(), 'utf-8'))
     else:
         res = u'spelled correctly. probably.'
     return u'%s: %s' % (nick, res)
 def lookup(self, term, idx=1):
     """Lookup term in dictionary"""
     url = urljoin(self.define_url, quote(term.lower()))
     soup = getsoup(url, referer=self.base_url)
     for br in soup('br'):
         br.extract()
     val = strip_html(soup.renderContents().decode('utf-8'))
     val = val.replace(u'\xa0', ' ').replace('\n', ' ')
     return self.whitespace_re.sub(' ', val).strip()
 def response(self, nick, args, kwargs):
     opts = {'hl': 'en', 'aq': 'f', 'safe': 'off', 'q': args[0]}
     soup = getsoup(self.google_search, opts, referer=self.google_url)
     a = soup.body.find('a', 'spell')
     if a:
         res = strip_html(a.renderContents().decode('utf-8', 'ignore'))
     else:
         res = u'spelled correctly'
     return u'%s: %s' % (nick, res)
Beispiel #19
0
 def lookup(self, term, idx=1):
     """Lookup term in dictionary"""
     url = urljoin(self.define_url, quote(term.lower()))
     soup = getsoup(url, referer=self.base_url)
     for br in soup('br'):
         br.extract()
     val = strip_html(decode(soup.renderContents(), 'utf-8'))
     val = val.replace(u'\xa0', ' ').replace('\n', ' ')
     return self.whitespace_re.sub(' ', val).strip()
Beispiel #20
0
 def response(self, nick, args, kwargs):
     try:
         url = self.google.lucky(u'site:lyrics.wikia.com ' + args[0])
     except NonRedirectResponse:
         opts = {'search': args[0], 'ns0': 1}
         soup = getsoup(self.searchurl, referer=self.baseurl, opts=opts)
         url = urljoin(self.baseurl, soup.li.a['href'])
     soup = getsoup(url, referer=self.baseurl)
     title = self.render(soup.title).split(' - LyricWiki')[0]
     title = title.replace(':', ' - ')
     title = title.replace('_', ' ')
     lyrics = soup.find('div', 'lyricbox')
     for spam in lyrics('div', 'rtMatcher'):
         spam.extract()
     lyrics = self.render(lyrics)
     lyrics = self.normalize(lyrics)
     if not lyrics or lyrics == 'None':
         raise ValueError('no results')
     return u'%s:\n%s' % (title, lyrics)
Beispiel #21
0
 def response(self, nick, args, kwargs):
     try:
         url = self.google.lucky(u"site:lyrics.wikia.com " + args[0])
     except NonRedirectResponse:
         opts = {"search": args[0], "ns0": 1}
         soup = getsoup(self.searchurl, referer=self.baseurl, opts=opts)
         url = urljoin(self.baseurl, soup.li.a["href"])
     soup = getsoup(url, referer=self.baseurl)
     title = self.render(soup.title).split(" - LyricWiki")[0]
     title = title.replace(":", " - ")
     title = title.replace("_", " ")
     lyrics = soup.find("div", "lyricbox")
     for spam in lyrics("div", "rtMatcher"):
         spam.extract()
     lyrics = self.render(lyrics)
     lyrics = self.normalize(lyrics)
     if not lyrics or lyrics == "None":
         raise ValueError("no results")
     return u"%s:\n%s" % (title, lyrics)
Beispiel #22
0
 def response(self, nick, args, kwargs):
     try:
         url = self.google.lucky(u'site:lyrics.wikia.com ' + args[0])
     except NonRedirectResponse:
         opts = {'search': args[0], 'ns0': 1}
         soup = getsoup(self.searchurl, referer=self.baseurl, opts=opts)
         url = urljoin(self.baseurl, soup.li.a['href'])
     soup = getsoup(url, referer=self.baseurl)
     title = self.render(soup.title).split(' - LyricWiki')[0]
     title = title.replace(':', ' - ')
     title = title.replace('_', ' ')
     lyrics = soup.find('div', 'lyricbox')
     for spam in lyrics('div', 'rtMatcher'):
         spam.extract()
     lyrics = self.render(lyrics)
     lyrics = self.normalize(lyrics)
     if not lyrics or lyrics == 'None':
         raise ValueError('no results')
     return u'%s:\n%s' % (title, lyrics)
Beispiel #23
0
    def response(self, nick, args, kwargs):
        url = urlunparse(('https', 'www.google.com', 'search', '',
            urlencode({'num': '100', 'safe': 'off', 'hl': 'en', 'q': 'site:songmeanings.com ' + args[0]}), ''))
        soup = getsoup(url)
        new = None
        for h3 in soup.findAll('h3', attrs={'class': 'r'}):
            uri = urlparse(h3.a['href'])
            if uri.path == '/url':
                url = dict(parse_qsl(uri.query))['q']
                uri = urlparse(url)
                if re.search('/songs/view/\d+', uri.path) is not None:
                    new = urlunparse(uri._replace(query='', fragment=''))
                    break
                elif re.search('/profiles/(submissions|interaction)/\d+/comments', uri.path) is not None:
                    soup = getsoup(url)
                    for a in soup.find('a', title='Direct link to comment'):
                        new = urlunparse(urlparse(a.parent['href'])._replace(fragment='', query=''))
                        break
                if new:
                    break
        if new:
            url = new
            try:
                soup = getsoup(url)
                try:
                    title = re.sub('\s+Lyrics\s+\|\s+SongMeanings.*$', '', soup.title.renderContents())
                except StandardError:
                    title = 'Unknown artist/song, check parsing code!'
                text = soup.find('div', attrs={'class': re.compile(r'.*lyric-box.*')})
                for a in text('a'):
                    a.extract()
            except StandardError:
                self.log.warn('unable to find textblock from url {0!r} (query: {1!r})'.format(url, args[0]))
                return u'{nick}: {error}'.format(error=self.error, **kwargs)

            try:
                lyrics = decode(text.renderContents(), 'utf-8')
                return u'\n'.join(['[{}]'.format(title)] + filter(None,
                    [line.strip() for line in strip_html(lyrics).splitlines()]))
            except StandardError:
                self.log.exception('error parsing lyrics for query: {0!r}'.format(args[0]))
                return u'{nick}: {error}'.format(error=self.error, **kwargs)
Beispiel #24
0
 def lookup(self, query, idx=None):
     """Look up term on urban dictionary"""
     if idx is None:
         idx = 1
     orig_idx = idx
     page = int(idx / self.RESULTS_PER_PAGE)
     idx = (idx % self.RESULTS_PER_PAGE) - 1
     if idx == -1:
         idx = 6
     soup = getsoup(self.urban_search, {'term': query, 'page': page},
                    referer=self.urban_url)
     return self.parse(soup, idx, page, orig_idx)
Beispiel #25
0
 def rate_rt_audience(self, name):
     """Audience Rating from rotten tomatoes"""
     soup = getsoup(self.rt_search, {'search': name}, referer=self.rt_url)
     ourname = self.normalize(name)
     results = soup.find('ul', id='movie_results_ul')
     if results is None:
         rating = soup.find(name="span", attrs={ "class" : "meter popcorn numeric " }).renderContents() + "%"
         title = strip_html(encode(soup.find('h1', 'movie_title').renderContents(), 'utf-8')).strip()
         return title, rating
     else:
         for result in results('li'):
             try:
                 title = strip_html(result.find('div', 'media_block_content').h3.a.renderContents()).strip()
                 if ourname == self.normalize(title):
                     url = result.h3.a['href']
                     innerSoup = getsoup(self.rt_url+url, { }, self.rt_search, {'search': name})
                     rating = innerSoup.find(name="span", attrs= { "class" : "meter popcorn numeric " }).renderContents() + "%"
                     return title, rating
             except AttributeError:
                 pass
         return
Beispiel #26
0
 def lookup(self, query, idx=None):
     """Look up term on urban dictionary"""
     if idx is None:
         idx = 1
     orig_idx = idx
     page = int(idx / RESULTS_PER_PAGE)
     idx = (idx % RESULTS_PER_PAGE) - 1
     if idx == -1:
         idx = 6
     return self.parse(getsoup(_urban_search,
                               {'term': query, 'page': page},
                               referer=_urban_url,),
                       idx, page, orig_idx)
Beispiel #27
0
 def response(self, nick, args, kwargs):
     query, idx = args
     try:
         if query:
             if idx:
                 idx = int(idx)
             res = self.lookup(query, idx)
         else:
             res = self.parse(getsoup(_urban_random, referer=_urban_url))
     except (SystemExit, KeyboardInterrupt):
         raise
     except:
         res = u"That doesn't even exist in urban dictionary, stop making stuff up."
     return u'{}: {}'.format(nick, res)
Beispiel #28
0
 def response(self, nick, args, kwargs):
     try:
         url = args[0]
         uri = urlparse(url)
         if (uri.scheme.lower() in SCHEMES and
                 '.'.join(uri.netloc.lower().split('.')[-2:]) in DOMAINS and
                 os.path.split(os.path.normpath(uri.path))[-1] == 'watch' and
                 'v' in cgi.parse_qs(uri.query)):
             soup = getsoup(url)
             title = strip_html(decode(soup.title.renderContents())).replace(u' - YouTube', u'').strip()
             if title:
                 self.bot.output(title, kwargs['req'])
     except (KeyboardInterrupt, SystemExit):
         raise
     except:
         pass
Beispiel #29
0
    def response(self, nick, args, kwargs):
        query = args[0]
        sopa = getsoup(self.google.find(query))

        contador = 1  # Yay for the mexican dev
        myretval = u""

        for li in sopa.body("div", {"id": "ires"})[0].ol("li", {"class": "g"}):
            if contador > 3:
                break
            name = strip_html(decode(li.b.renderContents()))
            urlpluscrap = li.a["href"].replace("/url?q=", "")
            url = urlpluscrap.split("&sa")[0]
            myretval += u"{}: {} \n".format(name, url)
            contador += 1

        return myretval
Beispiel #30
0
    def response(self, nick, args, kwargs):
        query = 'site\:youtube.com/watch '+args[0]
        #return u'{}: {}: {}'.format(nick, YOUTUBE, self.google.lucky(query))

        url = self.google.lucky(query)
        uri = urlparse(url)
        if (uri.scheme.lower() in SCHEMES and
                '.'.join(uri.netloc.lower().split('.')[-2:]) in DOMAINS and
                os.path.split(os.path.normpath(uri.path))[-1] == 'watch' and
                'v' in cgi.parse_qs(uri.query)):
            soup = getsoup(url)
            title = strip_html(decode(soup.title.renderContents())).replace(u' - YouTube', u'').strip()
            if title:
                response = u'{} - {}'.format(title, url)
                self.bot.output(response, kwargs['req'])
            else:
                return u'{} - {}'.format('Cant find youtube link, here is a google lucky search', url)
Beispiel #31
0
 def rate_rt(self, name):
     """Rating from rotten tomatoes"""
     soup = getsoup(self.rt_search, {'search': name}, referer=self.rt_url)
     ourname = self.normalize(name)
     results = soup.find('ul', id='movie_results_ul')
     if results is None:
         rating = soup.find('span', id='all-critics-meter').renderContents() + '%'
         title = strip_html(soup.find('h1', 'movie_title').renderContents().encode('utf-8', 'ignore')).strip()
         return title, rating
     else:
         for result in results('li'):
             try:
                 rating = strip_html(result.find('span', 'tMeterScore').renderContents()).strip()
                 title = strip_html(result.find('div', 'media_block_content').h3.a.renderContents()).strip()
                 if ourname == self.normalize(title):
                     return title, rating
             except AttributeError:
                 pass
Beispiel #32
0
    def response(self, nick, args, kwargs):
        query = args[0]
        check = self.clean.sub(u'', query)
        check = re.compile(re.escape(check), re.I)

        results = []
        page = getsoup(self.url)
        table = page.find('table', id='gvIncidents')
        rows = table('tr')[1:]
        for row in rows:
            _, num, time, type, loc, coord, area = [
                strip_html(cell.renderContents()) for cell in row('td')
            ]
            if check.search(loc):
                results.append(u'=> %s: %s (%s) %s' % (time, loc, area, type))
        if len(results) > 0:
            return u'\n'.join(results)
        else:
            return u'%s: No incidents found' % nick
Beispiel #33
0
    def response(self, nick, args, kwargs):
        query = args[0]
        check = self.clean.sub(u'', query)
        check = re.compile(re.escape(check), re.I)

        results = []
        page = getsoup(self.url)
        table = page.find('table', id='gvIncidents')
        rows = table('tr')[1:]
        for row in rows:
            _, num, time, type, loc, coord, area = [
                    strip_html(cell.renderContents())
                    for cell in row('td')
                    ]
            if check.search(loc):
                results.append(u'=> %s: %s (%s) %s' % (time, loc, area, type))
        if len(results) > 0:
            return u'\n'.join(results)
        else:
            return u'%s: No incidents found' % nick
Beispiel #34
0
 def response(self, nick, args, kwargs):
     soup = getsoup(self.baseurl)
     out = []
     for box in soup.find('div', 'score-box').findAll('div', 'box'):
         score = []
         for key in 'name', 'score':
             val = strip_html(box.find('span', key).renderContents()).replace(u'\xa0', u'').strip()
             if key == 'name':
                 if val == u'Obama':
                     color = 'blue'
                 elif val == 'Romney':
                     color = 'red'
                 else:
                     color = None
                 if color:
                     val = self.colorlib.get_color(color, text=val)
             if val:
                 score.append(val)
         if score:
             out.append(u'%s: %s' % tuple(score))
     return u'%s: %s' % (nick, u', '.join(out))
Beispiel #35
0
    def response(self, nick, args, kwargs):
        try:
            url = self.google.lucky(u'site:songmeanings.net ' + args[0])
        except NonRedirectResponse:
            self.log.warn('no url for query {0!r} found from google lucky'.format(args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)

        try:
            soup = getsoup(url)
            try:
                title = strip_html(soup.find('a', 'pw_title').renderContents()).strip()
            except StandardError:
                title = 'Unknown artist/song, check parsing code!'
            text = soup.find('div', id='textblock')
        except StandardError:
            self.log.warn('unable to find textblock from url {0!r} (query: {1!r})'.format(url, args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)

        try:
            lyrics = decode(text.renderContents(), 'utf-8')
            return u'\n'.join(['[{}]'.format(title)] + filter(None, [line.strip() for line in strip_html(lyrics).splitlines()]))
        except StandardError:
            self.log.exception('error parsing lyrics for query: {0!r}'.format(args[0]))
            return u'{nick}: {error}'.format(error=self.error, **kwargs)
Beispiel #36
0
def get_text():
    text = random.choice(getsoup(url).body.find('ul', id='texts-list')('div', 'text')).textarea
    return spam_re.sub(u'', text.renderContents().decode('utf-8'))
Beispiel #37
0
 def getweather(self, location):
     """Look up NOAA weather"""
     soup = getsoup(self.noaa_search, {'inputstring': location},
                    referer=self.noaa_url)
     return u' / '.join(
         map(self.render, soup.findAll(attrs={'class': self.fc_re})))
Beispiel #38
0
 def getsoup(self, *args, **kwargs):
     return getsoup(*args, **dict(kwargs, logger=self.log))
Beispiel #39
0
 def response(self, nick, args, kwargs):
     page = getsoup('http://beeradvocate.com/search', {'q': args[0], 'qt': 'beer', 'ls': 'Y', 'retired': 'N'})
     page = page.find('div', id='baContent')
     page = getsoup(urljoin('http://beeradvocate.com/', page.ul.findAll('li')[0].a['href']))
     return page.find('meta', property='og:description')['content']
Beispiel #40
0
 def random(self):
     """Get a random definition"""
     soup = getsoup(self.urban_random, referer=self.urban_url)
     return self.parse(soup)
Beispiel #41
0
 def random(self):
     """Get a random definition"""
     soup = getsoup(self.urban_random, referer=self.urban_url)
     return self.parse(soup)
Beispiel #42
0
 def response(self, nick, args, kwargs):
     soup = getsoup(self.url, {'InData': args[0]})
     city = soup.body.find('table', bgcolor='#ffffcc').a
     return u'%s: %s: %s, %s' % (nick, args[0],
                                 proper(render(city).capitalize()),
                                 proper(render(city.parent.findNext('td'))))
Beispiel #43
0
def get_text():
    text = random.choice(
        getsoup(url).body.find('ul', id='texts-list')('div', 'text')).textarea
    return spam_re.sub(u'', decode(text.renderContents(), 'utf-8'))
Beispiel #44
0
 def getweather(self, location):
     """Look up NOAA weather"""
     soup = getsoup(self.noaa_search, {'inputstring': location}, referer=self.noaa_url)
     return u' / '.join(map(self.render, soup.findAll(attrs={'class': self.fc_re})))
Beispiel #45
0
 def response(self, nick, args, kwargs):
     soup = getsoup(self.url, {'InData': args[0]})
     city = soup.body.find('table', bgcolor='#ffffcc').a
     return u'%s: %s: %s, %s' % (
             nick, args[0], proper(render(city).capitalize()),
             proper(render(city.parent.findNext('td'))))