Пример #1
0
def main(bot, user, target, msg):
    feed = feedparser.parse(bot.memory['feeds']['civfanatics']['url'])
    bot.message(
        user, target, feed.feed.title + ' :: ' + feed.entries[0].title + ': ' +
        feed.entries[0].summary_detail.value + ' - ' +
        feed.entries[0].updated + ' - ' +
        xrl.xrl_encoder(connection, event, feed.entries[0].link))
    bot.memory['feeds']['civfanatics']['last_title'] = feed.feed.title
Пример #2
0
def autofeed(bot):
    for feed in feeds:
        rss = feedparser.parse(feeds[feed]['url'])
        if bot.memory['feeds'][feed]['last_title'] == feed.feed.title:
            pass
        else:
            bot.msg(
                feeds[feed], feed.feed.title + ' :: ' + feed.entries[0].title +
                ': ' + feed.entries[0].summary_detail.value + ' - ' +
                feed.entries[0].updated + ' - ' +
                xrl.xrl_encoder(feed.entries[0].link))
            feeds[feed]['last_title'] = feed.feed.title
Пример #3
0
def main(bot, user, target, msg):
    if len(msg.split()) == 1:
        bot.message(user, target, __doc__)
        return

    if len(msg.split()) is 2 and msg.split()[1].lower() is 'online':
        url = 'http://www.civfanatics.com'
        html = webbot.bot(url)
        if len(html.split()) > 50:
            bot.msg('The website appears to be online.')
        else:
            bot.msg('From my point of view the website seems to be down.')
        return

    if len(msg.split()) > 1:
        search_string = msg.split(' ', 1)[1]
        url = 'http://www.google.com/custom?q=' + search_string.replace(
            ' ', '+'
        ) + '&domains=forums.civfanatics.com&btnG=Search&sitesearch=forums.civfanatics.com'
        html = webbot.bot(url)
        try:
            hits = re.search(
                'Results <b>1</b> - <b>10</b> of about <b>(?P<hit>.+?)</b> from',
                html)
            #results = hits.group('hit')[0] + ' Results' #breaks in a search for python
            results = hits.group('hit') + ' Results'
            #entry = re.search('<div class=g style=\x22margin-left:.*?\x22.*?</div>',html)
            entry = re.search('<div class=g(>| ).*?\x22.*?</div>', html)
            link = re.search(
                '(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?',
                entry.group())
            test = re.search('class=l>(?P<title>.+?)</a>', entry.group())
            title = test.group('title')
            test = re.search('<div class=std>(?P<tag>.+?)<br>', entry.group())
            tag = test.group('tag')
            results = url_encode(results + ' | ' + title + ': ' +
                                 link.group() + ' - ' + tag)
            bot.message(user, target, results)
            bot.message(
                user, target, 'Results: ' +
                xrl.xrl_encoder('%sq=%s' %
                                (url, search_string.replace(' ', '+'))))
        except:
            message = 'There has been an error in the search processor.  A report file called google_dump.html has been created.  Please upload it to the issue tracker at: http://code.google.com/p/eyercbot/issues/'
            dump_file = open('google_dump.html', 'w')
            dump_file.write('<!-- Command: ' + msg + ' Search string: ' +
                            search_string + ' -->\n' + html)
            bot.msg(target, message)
            print message