Ejemplo n.º 1
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     data = {}
     stat = ''
     for line in html.splitlines():
         line = line.strip()
         if not line:
             continue
         elif '<td headers="category">' in line:
             stat = 'category'
         elif '<td headers="status">' in line:
             stat = 'status'
         elif '<td headers="assignedto">' in line:
             stat = 'assigned to'
         elif '<td headers="os">' in line:
             data['os'] = striphtml(line).strip()
         elif '<td headers="severity">' in line:
             data['severity'] = striphtml(line).strip()
         elif '<td headers="priority">' in line:
             data['priority'] = striphtml(line).strip()
         elif '<td headers="reportedver">' in line:
             data['version'] = striphtml(line).strip()
         elif '<h2 class="summary' in line:
             stat = 'summary'
         elif '<a href="#comments">Comments (' in line:
             data['comments'] = line.split('(', 1)[1].split(')')[0]
         # stats
         elif stat:
             if stat in ['category', 'status', 'assigned to', 'summary']:
                 data[stat] = striphtml(line).strip()
             stat = ''
     return data
Ejemplo n.º 2
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     data = {}
     stat = ''
     for line in html.splitlines():
         line = line.strip()
         if not line:
             continue
         elif '<td headers="category">' in line:
             stat = 'category'
         elif '<td headers="status">' in line:
             stat = 'status'
         elif '<td headers="assignedto">' in line:
             stat = 'assigned to'
         elif '<td headers="os">' in line:
             data['os'] = striphtml(line).strip()
         elif '<td headers="severity">' in line:
             data['severity'] = striphtml(line).strip()
         elif '<td headers="priority">' in line:
             data['priority'] = striphtml(line).strip()
         elif '<td headers="reportedver">' in line:
             data['version'] = striphtml(line).strip()
         elif '<h2 class="summary' in line:
             stat = 'summary'
         elif '<a href="#comments">Comments (' in line:
             data['comments'] = line.split('(', 1)[1].split(')')[0]
         # stats
         elif stat:
             if stat in ['category', 'status', 'assigned to', 'summary']:
                 data[stat] = striphtml(line).strip()
             stat = ''
     return data
Ejemplo n.º 3
0
 def makeresponse(self, name, res, channel, sep="\002||\002"):
     # loop over result to make a response
     result = ""
     itemslist = self.itemslists[(name, channel)]
     if not itemslist:
         rssitem = self.byname(name)
         if not rssitem:
             return "no %s rss item" % name
         else:
             self.itemslists.extend((name, channel), rssitem.itemslist)
             self.itemslists.save()
     for j in res:
         resultstr = ""
         for i in self.itemslists[(name, channel)]:
             try:
                 item = unicode(j[i])
                 if not item:
                     continue
                 if item.startswith("http://"):
                     resultstr += "<%s> - " % item
                 else:
                     resultstr += "%s - " % striphtml(item)
             except KeyError:
                 continue
         resultstr = resultstr[:-3]
         if resultstr:
             result += "%s %s " % (resultstr, sep)
     return result[:-6]
Ejemplo n.º 4
0
    def doscan(self, scanlist):
        for botname, channel, name, node in scanlist:            
            try:
                result = geturl('http://%s/7.html' % node)
            except Exception, ex:
                rlog(10, 'shoutcast', "can't get %s shoutcast data: %s" % \
(node, str(ex)))
                continue
            try:
                res = result.split(',')[6]
            except IndexError:
                rlog(10, 'shoutcast', "can't match %s shoutcast data" % node)
                continue
            song = striphtml(res).strip().replace('\n', '')
            bot = fleet.byname(botname)
            if bot and channel in bot.state['joinedchannels']:
                got = False
                for ttime, played in self.songsplayed:
                    if played == song:
                        got = True
                if not got:
                    self.songsplayed.append((time.time(), song))
                    bot.say(channel, "now playing on %s: %s" % (name, song))
                else:
                    for ttime, played in self.songsplayed:
                        if time.time() - ttime > 1800:
                            self.songsplayed.remove((ttime, played))
Ejemplo n.º 5
0
 def makeresponse(self, name, res, channel, sep="\002||\002"):
     # loop over result to make a response
     result = ""
     itemslist = self.itemslists[(name, channel)]
     if not itemslist:
         rssitem = self.byname(name)
         if not rssitem:
             return "no %s rss item" % name
         else:
             self.itemslists.extend((name, channel), rssitem.itemslist)
             self.itemslists.save()
     for j in res:
         resultstr = ""
         for i in self.itemslists[(name, channel)]:
             try:
                 item = unicode(j[i])
                 if not item:
                     continue
                 if item.startswith('http://'):
                     resultstr += "<%s> - " % item
                 else:
                     resultstr += "%s - " % striphtml(item)
             except KeyError:
                 continue
         resultstr = resultstr[:-3]
         if resultstr:
             result += "%s %s " % (resultstr, sep)
     return result[:-6]
Ejemplo n.º 6
0
def handle_wowwiki(bot, ievent):
    """ wikipedia <what> .. search wikipedia for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    what = ""
    lang = 'en'
    for i in ievent.rest.split():
        first = i[0].upper()
        rest = i[1:]
        if i.startswith('-'):
            if len(i) != 3:
                ievent.reply('invalid option')
                return
            lang = i[1:]
            continue
        what += "%s%s " % (first, rest)
    what = what.strip().replace(' ', '_')
    url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(what.encode('utf-8'))
    url2 = 'http://wowwiki.com/wiki/%s' % quote(what.encode('utf-8'))
    txt = getwikidata(url, ievent)
    if not txt:
        return
    if '#REDIRECT' in txt or '#redirect' in txt:
        redir = ' '.join(txt.split()[1:])
        url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(redir.encode('utf-8'))
        url2 = 'http://wowwiki.com/wiki/%s' % quote(redir.encode('utf-8'))
        txt = getwikidata(url, ievent)
    if not txt:
        return
    res = ['%s ===> ' % url2, ]
    res += splittxt(striphtml(txt).strip())
    ievent.reply(res)
Ejemplo n.º 7
0
def handle_wikipedia(bot, ievent):
    """ wikipedia <what> .. search wikipedia for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    res = searchwiki(ievent.rest)
    if not res:
        ievent.reply('no result found')
        return
    txt, url = res
    res = ['%s ===> ' % url, ]
    res += splittxt(striphtml(txt).strip())
    ievent.reply(res)
Ejemplo n.º 8
0
def handle_mash(bot, ievent):
    if not ievent.rest:
        ievent.missing('<what>')
        return
    data = geturl(baseurl + '+'.join(ievent.rest.split()))
    try:
        results = loads(data)
    except ValueError:
        ievent.reply("can't make results of %s" % data)
        return
    res = []
    for result in results['results']:
        res.append('%s: - <%s>' % (striphtml(result['title']), result['url']))
    ievent.reply('results for %s: ' % ievent.rest, res, dot=' || ')
Ejemplo n.º 9
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     if 'APPLICATION ERROR #1100' in html:
         raise BugTrackerNotFound('issue not found')
     data = {'notes': 0}
     stat = ''
     skip = 0
     for line in html.splitlines():
         line = line.strip().replace('\t', '')
         if skip > 0:
             skip -= 1
             continue
         elif not line:
             continue
         elif '<!-- Category -->' in line:
             skip = 1
             stat = 'category'
         elif '<!-- Severity -->' in line:
             skip = 1
             stat = 'severity'
         elif '<!-- Reproducibility -->' in line:
             skip = 1
             stat = 'reproducibility'
         elif '<!-- Reporter -->' in line:
             skip = 3
             stat = 'reporter'
         elif '<!-- Priority -->' in line:
             skip = 1
             stat = 'priority'
         elif '<!-- Resolution -->' in line:
             skip = 1
             stat = 'resolution'
         elif '<!-- Status -->' in line:
             skip = 3
             stat = 'status'
         elif '<!-- Summary -->' in line:
             skip = 4
             stat = 'summary'
         elif '<td class="bugnote-public">' in line:
             data['notes'] += 1
         # stats
         elif stat:
             if stat in [
                     'category', 'severity', 'reproducibility', 'reporter',
                     'priority', 'resolution', 'status', 'summary'
             ]:
                 data[stat] = striphtml(line)
             stat = ''
     return data
Ejemplo n.º 10
0
def handle_mash(bot, ievent):
    if not ievent.rest:
        ievent.missing('<what>')
        return
    data = geturl(baseurl + '+'.join(ievent.rest.split()))
    try:
        results = loads(data)
    except ValueError:
        ievent.reply("can't make results of %s" % data)
        return 
    res = []
    for result in results['results']:
        res.append('%s: - <%s>' % (striphtml(result['title']), result['url']))
    ievent.reply('results for %s: ' % ievent.rest, res, dot=' || ')
Ejemplo n.º 11
0
def fetch(server, qid='random'):
    html = geturl2('http://%s/%s' % (server, qid))
    text = ''
    keep = False
    for line in html.splitlines():
        if len(line.split('</p>')) == 3:
            return striphtml(line.split('</p>')[1])
        elif line.startswith('<p class="quote">'):
            if '<p class="qt">' in line:
                if line.endswith('</p>'):
                    return striphtml(re_p.findall(line)[0])
                else:
                    text = line.split('<p class="qt">')[1]
                    keep = True
        elif keep:
            if '</p>' in line:
                text = text + line.split('</p>')[0]
                return striphtml(text.replace('<br />', ' '))
            else:
                text = text + line
    if text:
        return striphtml(text.replace('<br />', ' '))
    else:
        return 'no result'
Ejemplo n.º 12
0
def fetch(server, qid='random'):
    html = geturl2('http://%s/%s' % (server, qid))
    text = ''
    keep = False
    for line in html.splitlines():
        if len(line.split('</p>')) == 3:
            return striphtml(line.split('</p>')[1])
        elif line.startswith('<p class="quote">'):
            if '<p class="qt">' in line:
                if line.endswith('</p>'):
                    return striphtml(re_p.findall(line)[0])
                else:
                    text = line.split('<p class="qt">')[1]
                    keep = True
        elif keep:
            if '</p>' in line:
                text = text + line.split('</p>')[0]
                return striphtml(text.replace('<br />', ' '))
            else:
                text = text + line
    if text:
        return striphtml(text.replace('<br />', ' '))
    else:
        return 'no result'
Ejemplo n.º 13
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     if 'APPLICATION ERROR #1100' in html:
         raise BugTrackerNotFound('issue not found')
     data = {'notes': 0}
     stat = ''
     skip = 0
     for line in html.splitlines():
         line = line.strip().replace('\t', '')
         if skip > 0:
             skip -= 1
             continue
         elif not line:
             continue
         elif '<!-- Category -->' in line:
             skip = 1
             stat = 'category'
         elif '<!-- Severity -->' in line:
             skip = 1
             stat = 'severity'
         elif '<!-- Reproducibility -->' in line:
             skip = 1
             stat = 'reproducibility'
         elif '<!-- Reporter -->' in line:
             skip = 3
             stat = 'reporter'
         elif '<!-- Priority -->' in line:
             skip = 1
             stat = 'priority'
         elif '<!-- Resolution -->' in line:
             skip = 1
             stat = 'resolution'
         elif '<!-- Status -->' in line:
             skip = 3
             stat = 'status'
         elif '<!-- Summary -->' in line:
             skip = 4
             stat = 'summary'
         elif '<td class="bugnote-public">' in line:
             data['notes'] += 1
         # stats
         elif stat:
             if stat in ['category', 'severity', 'reproducibility', 'reporter',
                 'priority', 'resolution', 'status', 'summary']:
                 data[stat] = striphtml(line)
             stat = ''
     return data
Ejemplo n.º 14
0
def markovlearnurl(url):
    """ learn an url """
    lines = 0
    rlog(10, 'markov', 'learning %s' % url)
    try:
        f = geturl(url)
        for line in f.split('\n'):
            line = striphtml(line)
            if lines % 10 == 0:
                time.sleep(0.01)
            line = line.strip()
            if not line:
                continue
            markovtalk_learn(line)
            lines += 1
    except Exception, e:
        rlog(10, 'markov', str(e))
Ejemplo n.º 15
0
def markovlearnurl(url):
    """ learn an url """
    lines = 0
    rlog(10, 'markov', 'learning %s' % url)
    try:
        f = geturl(url)
        for line in f.split('\n'):
            line = striphtml(line)
            if lines % 10 == 0:
                time.sleep(0.01)
            line = line.strip()
            if not line:
                continue
            markovtalk_learn(line)
            lines += 1
    except Exception, e:
        rlog(10, 'markov', str(e))
Ejemplo n.º 16
0
 def comments(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     bugrss = geturl(self.comments_url(bugId))
     bugdom = xml.dom.minidom.parseString(bugrss)
     bugall = bugdom.getElementsByTagName('item')
     comments = []
     if bugall:
         for item in bugall:
             title = item.getElementsByTagName('title')[0].firstChild.nodeValue
             if 'comment added' in title:
                 try:
                     author = item.getElementsByTagName('dc:creator')[0].firstChild.nodeValue
                 except IndexError:
                     author = 'anonymous'
                 comment = item.getElementsByTagName('description')[0].firstChild.nodeValue
                 comment = striphtml(comment.replace('\n', ' ')).strip()
                 while '  ' in comment:
                     comment = comment.replace('  ', ' ')
                 comments.append('%s: %s' % (author, comment))
     return comments    
Ejemplo n.º 17
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to ba a number"
     html = geturl2(self.show_url(bugId)).splitlines()
     data = {}
     stat = ''
     for line in html:
         line = line.strip()
         if not line:
             continue
         if line.startswith('<span class=h3 >'):
             data['summary'] = striphtml(line)
         elif line.startswith('<tr><th align=left>Status:'):
             stat = 'status'
         elif line.startswith('class=label><b>Type-</b>'):
             data['type'] = striphtml(line.split('</b>')[1])
         elif line.startswith('class=label><b>Priority-</b>'):
             data['priority'] = striphtml(line.split('</b>')[1])
         elif line.startswith(
                 '<span class=author>') and not data.has_key('author'):
             stat = 'author'
         elif line.startswith('<tr><th align=left>Owner:'):
             stat = 'owner'
         elif line.startswith('<span class="date" title="'):
             data['date'] = striphtml(line)
         elif striphtml(line) == '':
             pass
         # stats
         elif stat == 'author':
             data['reporter'] = striphtml(line)
             stat = ''
         elif stat == 'owner':
             data['owner'] = striphtml(line)
             stat = ''
         elif stat == 'status':
             data['status'] = striphtml(line)
             stat = ''
     return data
Ejemplo n.º 18
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to ba a number"
     html = geturl2(self.show_url(bugId)).splitlines()
     data = {}
     stat = ''
     for line in html:
         line = line.strip()
         if not line:
             continue
         if line.startswith('<span class=h3 >'):
             data['summary'] = striphtml(line)
         elif line.startswith('<tr><th align=left>Status:'):
             stat = 'status'
         elif line.startswith('class=label><b>Type-</b>'):
             data['type'] = striphtml(line.split('</b>')[1])
         elif line.startswith('class=label><b>Priority-</b>'):
             data['priority'] = striphtml(line.split('</b>')[1])
         elif line.startswith('<span class=author>') and not data.has_key('author'):
             stat = 'author'
         elif line.startswith('<tr><th align=left>Owner:'):
             stat = 'owner'
         elif line.startswith('<span class="date" title="'):
             data['date'] = striphtml(line)
         elif striphtml(line) == '':
             pass
         # stats
         elif stat == 'author':
             data['reporter'] = striphtml(line)
             stat = ''
         elif stat == 'owner':
             data['owner'] = striphtml(line)
             stat = ''
         elif stat == 'status':
             data['status'] = striphtml(line)
             stat = ''
     return data
Ejemplo n.º 19
0
def handle_wikiquote(bot, ievent):
    """ wikiquote <what> .. search wikiquote for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    what = ""
    lang = 'en'
    for i in ievent.rest.split():
        first = i[0].upper()
        rest = i[1:]
        if i.startswith('-'):
            if len(i) != 3:
                ievent.reply('invalid option')
                return
            lang = i[1:]
            continue
        what += "%s%s " % (first, rest)
    what = what.strip().replace(' ', '_')
    url = 'http://%s.wikiquote.org/w/wiki.phtml?title=%s' % (lang, what)
    result = geturl(url)
    if not result:
        ievent.reply("can't find data for %s" % what)
        return
    result = result.replace('\n', ' ')
    result = re.sub('\s+', ' ', result)
    regresult = re.search(wikire, result)
    if not regresult:
        ievent.reply("can't match regular expression %s" % url)
        return
    txt = regresult.groups()[0]
    try:
        res = re.sub(
            '\[.*?\]', '',
            striphtml(random.choice(re.findall('<li>(.*?)</li>', txt))))
    except IndexError:
        ievent.reply("can't find quote")
        return
    ievent.reply(res)
Ejemplo n.º 20
0
 def comments(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     bugrss = geturl(self.comments_url(bugId))
     bugdom = xml.dom.minidom.parseString(bugrss)
     bugall = bugdom.getElementsByTagName('item')
     comments = []
     if bugall:
         for item in bugall:
             title = item.getElementsByTagName(
                 'title')[0].firstChild.nodeValue
             if 'comment added' in title:
                 try:
                     author = item.getElementsByTagName(
                         'dc:creator')[0].firstChild.nodeValue
                 except IndexError:
                     author = 'anonymous'
                 comment = item.getElementsByTagName(
                     'description')[0].firstChild.nodeValue
                 comment = striphtml(comment.replace('\n', ' ')).strip()
                 while '  ' in comment:
                     comment = comment.replace('  ', ' ')
                 comments.append('%s: %s' % (author, comment))
     return comments
Ejemplo n.º 21
0
def handle_wowwiki(bot, ievent):
    """ wikipedia <what> .. search wikipedia for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    what = ""
    lang = 'en'
    for i in ievent.rest.split():
        first = i[0].upper()
        rest = i[1:]
        if i.startswith('-'):
            if len(i) != 3:
                ievent.reply('invalid option')
                return
            lang = i[1:]
            continue
        what += "%s%s " % (first, rest)
    what = what.strip().replace(' ', '_')
    url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(
        what.encode('utf-8'))
    url2 = 'http://wowwiki.com/wiki/%s' % quote(what.encode('utf-8'))
    txt = getwikidata(url, ievent)
    if not txt:
        return
    if '#REDIRECT' in txt or '#redirect' in txt:
        redir = ' '.join(txt.split()[1:])
        url = 'http://wowwiki.com/wiki/Special:Export/%s' % quote(
            redir.encode('utf-8'))
        url2 = 'http://wowwiki.com/wiki/%s' % quote(redir.encode('utf-8'))
        txt = getwikidata(url, ievent)
    if not txt:
        return
    res = [
        '%s ===> ' % url2,
    ]
    res += splittxt(striphtml(txt).strip())
    ievent.reply(res)
Ejemplo n.º 22
0
def handle_wikiquote(bot, ievent):
    """ wikiquote <what> .. search wikiquote for <what> """
    if not ievent.rest:
        ievent.missing('<what>')
        return
    what = ""
    lang = 'en'
    for i in ievent.rest.split():
        first = i[0].upper()
        rest = i[1:]
        if i.startswith('-'):
            if len(i) != 3:
                ievent.reply('invalid option')
                return
            lang = i[1:]
            continue
        what += "%s%s " % (first, rest)
    what = what.strip().replace(' ', '_')
    url = 'http://%s.wikiquote.org/w/wiki.phtml?title=%s' % (lang, what)
    result = geturl(url)
    if not result:
        ievent.reply("can't find data for %s" % what)
        return
    result = result.replace('\n', ' ')
    result = re.sub('\s+', ' ', result)
    regresult = re.search(wikire, result)
    if not regresult:
        ievent.reply("can't match regular expression %s" % url)
        return
    txt = regresult.groups()[0]
    try:
        res = re.sub('\[.*?\]', '', striphtml(random.choice(re.findall('<li>(.*?)</li>',txt))))
    except IndexError:
        ievent.reply("can't find quote")
        return
    ievent.reply(res)
Ejemplo n.º 23
0
    postdata = urllib.urlencode(postarray)
    req = urllib2.Request(url='http://tinyurl.com/create.php', data=postdata)
    req.add_header('User-agent', useragent())
    try:
        res = urllib2.urlopen(req).readlines()
        #raise Exception("mekker")
    except urllib2.URLError, e:
        rlog(10, 'tinyurl', 'URLError: %s' % str(e))
        return
    except urllib2.HTTPError, e:
        rlog(10, 'tinyurl', 'HTTP error: %s' % str(e))
        return
    urls = []
    for line in res:
        if line.startswith('<blockquote><b>'):
            urls.append(striphtml(line.strip()).split('[Open')[0])
    if len(urls) == 3:
        urls.pop(0)
    return urls


def handle_tinyurl(bot, ievent):
    """ get tinyurl from argument or last url in log """
    if not ievent.rest and (not urlcache.has_key(bot.name) or not \
urlcache[bot.name].has_key(ievent.target)):
        ievent.missing('<url>')
        return
    elif not ievent.rest:
        url = urlcache[bot.name][ievent.target]
    else:
        url = ievent.rest
Ejemplo n.º 24
0
    postdata = urllib.urlencode(postarray)
    req = urllib2.Request(url='http://tinyurl.com/create.php', data=postdata)
    req.add_header('User-agent', useragent())
    try:
        res = urllib2.urlopen(req).readlines()
        #raise Exception("mekker")
    except urllib2.URLError, e:
        rlog(10, 'tinyurl', 'URLError: %s' % str(e))
        return
    except urllib2.HTTPError, e:
        rlog(10, 'tinyurl', 'HTTP error: %s' % str(e))
        return
    urls = []
    for line in res:
        if line.startswith('<blockquote><b>'):
            urls.append(striphtml(line.strip()).split('[Open')[0])
    if len(urls) == 3:
        urls.pop(0)
    return urls

def handle_tinyurl(bot, ievent):
    """ get tinyurl from argument or last url in log """
    if not ievent.rest and (not urlcache.has_key(bot.name) or not \
urlcache[bot.name].has_key(ievent.target)):
        ievent.missing('<url>')
        return
    elif not ievent.rest:
        url = urlcache[bot.name][ievent.target]
    else:
        url = ievent.rest
    url = valid_url(url)
Ejemplo n.º 25
0
def handle_vandale(bot, ievent):
    try:
        woord = ievent.args[0]
	if "%" in woord:
	    woord = woord.replace("%", "%25")
    except IndexError:
        ievent.missing('<woord>')
        return

    try:
	f = urllib.urlopen(url % woord)
    except:
        ievent.reply('connection failed')
	return
	
    data = f.read()
    if "Geen resultaat." in data:
        ievent.reply('not found')
        return
    
    results = []
    pos = 0
    while (True):
#        spos = data[pos:].find("<BIG>")
#	spos = data[pos:].find("<table class=")
	spos = data[pos:].find("<span class=\"pnn4_k\">")
        if spos==-1:
	    break
#        epos = data[pos+spos:].find("</td></tr>")
#	epos = data[pos+spos:].find("</td></tr></table></div>")
	epos = data[pos+spos:].find("</div></div>")
	if epos==-1:
	    break
	rstr = data[pos+spos:pos+spos+epos]
	# convert special chars and isohtmlcodes
	for k, v in IsoHtmlDict.items():
	    if v in rstr:
	        rstr = rstr.replace(v, k)
	for k, v in SpecialCharsMap.items():
	    if k in rstr:
 	        rstr = rstr.replace(k, v)

        # stress
        if not bot.jabber:
            stress = re_stress.split(rstr)
            if len(stress)==3:
                rstr = "%s\037%s\037%s" % (stress[0], stress[1], stress[2])
        # strip html chars
	rstr = striphtml(rstr)
	# count
	if rstr.split(" ", 1)[0][-1] in "0123456789":
	    w, r = rstr.split(" ", 1)
	    rstr = "%s (%s) %s" % (w[:-1], w[-1], r)
        # bold
	brac = rstr.find(")1")
	if brac!=-1:
	    rstr = "%s .. %s" % (rstr[:brac+1], rstr[brac+1:])
	else:
	    rstr = "%s" % (rstr)
	results.append(rstr)
        pos = pos+spos+epos
    if results:
	ievent.reply(url % woord + ' ==>' + ' | '.join(results))
	return
    else:
        ievent.reply('not found')
        return
Ejemplo n.º 26
0
    try:
        server = ievent.args[0]
    except IndexError:
        ievent.missing('<server>')
        return
    try:
        result = geturl('http://%s/7.html' % server)
    except Exception, ex:
        ievent.reply("can't get shoutcast data: %s" % str(ex))
        return
    try:
        res = result.split(',')[6]
    except IndexError:
        ievent.reply("can't extract shoutcast data")
        return
    ievent.reply(striphtml(res).strip())

cmnds.add('sc', handle_sc, 'USER')
examples.add('sc', 'sc <host:port> .. ask server:port for currently running \
song', 'sc stream1.jungletrain.net:8000')

def handle_sclist(bot, ievent):
    ievent.reply("shoutcast nodes: %s" % cfg.get('nodes'))
    
cmnds.add('sc-list', handle_sclist, 'OPER')
examples.add('sc-list', 'show list of watched shoutcast servers', 'sc-list')

def handle_scadd(bot, ievent):
    try:
        name, node, polltime = ievent.args
    except ValueError: