Esempio n. 1
0
def parse(bot, channel, user, args):
    try:
        results = json.load(urllib2.urlopen(ddglink + urllib2.quote(args)))
        definition = stylize.Trunicate(results['Definition'], 150) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        defisrc = stylize.Trunicate(results['DefinitionSource'], 150) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        absttext = stylize.Trunicate(results['AbstractText'], 150) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        abstsource = stylize.Trunicate(results['AbstractSource'], 150) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        url = results['RelatedTopics'][0]['FirstURL'].encode('utf-8')
    except:
        noresults = True
    else:
        noresults = False

    if noresults:
        bot.msg(channel, 'No results for ' + args)
    else:
        bot.msg(
            channel,
            unhtml(definition) + ' (' + defisrc + u') · '.encode('utf-8') +
            url)
        bot.msg(
            channel,
            unhtml(absttext) + ' (' + abstsource + u') · '.encode('utf-8') +
            url)
Esempio n. 2
0
def parse(bot, channel, user, msg):
    for m in msg.split(' '):
        if m[:4] == 'http' and '//' in m[5:-len(m) + 8]:
            if getdomain(m, True) in ignoreddomains:
                continue
            try:
                r = requests.request('HEAD', m, verify=False)
            except requests.exceptions.ConnectionError:  # inexistent websites or issues like `Max retries exceeded'
                #bot.msg(channel, 'A connection error occured.')
                pass
            except requests.exceptions.HTTPError:
                #bot.msg(channel, 'An HTTP error occured.')
                pass
            except requests.exceptions.TooManyRedirects:
                #bot.msg(channel, 'Too many redirects.')
                pass
            else:
                mime = r.headers['content-type']
                if 'html' in mime.lower():
                    s = requests.request('GET', m, verify=False).text
                    BS = BeautifulSoup(s)
                    try:  # for websites without a title, as sprunge, so they won't spit out an AttributeError
                        title = BS.find('title').text.replace(
                            '\r', ' ').replace('\n',
                                               ' ').strip().encode('utf-8')
                    except:  # just print the same as if it were a text/plain
                        bot.msg(
                            channel,
                            stylize.Trunicate(
                                s.replace('\r', ' ').replace('\n', ' '),
                                max_title_length).encode('utf-8'))
                    else:  # print the title
                        bot.msg(channel,
                                stylize.Trunicate(title, max_title_length))
                elif 'text' in mime.lower():
                    s = requests.request('GET', m, verify=False).text
                    bot.msg(
                        channel,
                        stylize.Trunicate(
                            s.replace('\r', ' ').replace('\n', ' '),
                            max_title_length).encode('utf-8'))
                else:
                    try:
                        size = int(r.headers['content-length'])
                    except KeyError:
                        size = 0  # can't get content-length for some reason
                    bot.msg(
                        channel, 'Content-Type: ' + str(mime) + ' - ' +
                        humanize.sizefmt(size))
Esempio n. 3
0
def parse(bot, channel, user, arg):
    args = arg.split("|", 2)
    main, imports = arg, ""
    if len(args) == 2:
        imp = []
        for arg in args[0].split(' '):
            if arg.strip('"') != "":
                imp.append('"%s"' % arg.strip('"'))
        imports = "import(\n    %s\n);\n" % ';\n    '.join(imp)
        main = args[1]

    prog = "package main;\n%sfunc main(){\n    %s;\n}" % (imports, main.lstrip().strip(';'))
    resp = urllib2.urlopen("http://golang.org/compile", "body=%s" % urllib2.quote(prog)).read()
    jo = json.loads(resp)

    result = jo["output"]
    if jo["compile_errors"] != "":
        result = jo["compile_errors"] + "\n\n" + jo["output"]
    result = result.strip()

    if '\n' in resp or '\r' in result:
        paste = urllib2.urlopen("https://nnmm.nl/", urllib2.quote(prog+"\n\n"+result)).read()
        bot.msg(channel, "%s: %s" % (user, paste))
    elif len(result) > 300:
        bot.msg(channel, stylize.Trunicate(result ,300))
        paste = urllib2.urlopen("https://nnmm.nl/", urllib2.quote(prog+"\n\n"+result)).read()
        bot.msg(channel, "%s: %s" % (user, paste))
    else:
        bot.msg(channel, result)
Esempio n. 4
0
def parse(bot, channel, user, args):
    try:
        results = json.load(urllib2.urlopen(goolink + urllib2.quote(args)))
        title = stylize.Trunicate(results['responseData']['results'][0]['titleNoFormatting'], 150) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        content = stylize.Trunicate(results['responseData']['results'][0]['content'], 250) \
                .replace('\n', '').replace('\r', '').encode('utf-8')
        url = results['responseData']['results'][0]['url'].encode('utf-8')
    except:
        noresults = True
    else:
        noresults = False

    if noresults:
        bot.msg(channel, 'No results for ' + args)
    else:
        bot.msg(channel,
                unhtml(title).encode('utf-8') + u' · '.encode('utf-8') + url)
        bot.msg(channel, unhtml(strip_tags(content)))
Esempio n. 5
0
def parse(bot, channel, user, msg):
    for m in msg.split(' '):
        if m[:4] == "http" and '//' in m[5:-len(m) + 8]:
            domain = m.split('/')[2].split('?')[0]
            if 'boards.4chan.org' in domain:
                try:
                    gr = re.compile(r'([a-zA-Z0-9]+)/thread/([0-9]+)').search(
                        m)
                    jo = json.loads(
                        urllib2.urlopen(apiurl + gr.group(0) +
                                        '.json').read())["posts"]
                    info = "%s - %s%s | %s %s" % (
                        stylize.SetColor(gr.group(1), stylize.Color.Green),
                        stylize.SetColor(
                            ("Anonymous" if not 'name' in jo[0] else unescape(
                                jo[0]["name"])), stylize.Color.Blue),
                        stylize.SetColor(
                            ("" if not 'trip' in jo[0] else jo[0]['trip']),
                            stylize.Color.Yellow),
                        stylize.SetColor('r:' + str(len(jo)),
                                         stylize.Color.Red),
                        stylize.SetColor('i:' + str(jo[0]["images"]),
                                         stylize.Color.Red))
                    if 'sub' in jo[0]:
                        info += " | %s" % stylize.Trunicate(
                            unescape(jo[0]["sub"]))
                    if 'com' in jo[0]:
                        comment = BeautifulSoup(
                            unescape(jo[0]["com"]).replace("<br>", "\n")).text
                        comment = ' '.join([
                            l if l[0] != '>' else stylize.SetColor(
                                l, stylize.Color.Green)
                            for l in comment.split('\n') if len(l) > 0
                        ])
                        info += " | " + stylize.Trunicate(comment, 40)
                    bot.msg(channel, info)
                except Exception, e:
                    print e
Esempio n. 6
0
def parse(bot, channel, user, args):
    global pykey
    if pykey == "":
        pykey = pysession()
        if pykey == "":
            bot.msg(channel, "Could not obtain a session key")
            return
    url = "%sshell.do?&statement=%s&session=%s" % (apiurl, urllib2.quote(args),
                                                   urllib2.quote(pykey))
    resp = urllib2.urlopen(url).read().rstrip()
    resp = resp.replace('\001', '')
    if '\n' in resp or '\r' in resp:
        paste = urllib2.urlopen("https://nnmm.nl/", urllib2.quote(resp)).read()
        bot.msg(channel, "%s: %s" % (user, paste))
    elif len(resp) > 300:
        bot.msg(channel, stylize.Trunicate(resp, 300))
        paste = urllib2.urlopen("https://nnmm.nl/", urllib2.quote(resp)).read()
        bot.msg(channel, "%s: %s" % (user, paste))
    else:
        bot.msg(channel, resp)
Esempio n. 7
0
def parse(bot, channel, user, msg):
    #build url
    url = apiurl + "appid=" + wolframalphakey
    url += "&input=" + urllib2.quote(msg)

    #get result
    raw = urllib2.urlopen(url).read()
    tree = et.fromstring(raw)

    #parse result
    if tree.get('error') != 'false':
        bot.msg(channel, "Wolfram alpha returned an error")
    elif tree.get('numpods') > 0:
        result = tree.find('pod').find('subpod').find('plaintext').text
        result = result.encode('UTF-8', 'ignore')  #f*****g unicode
        bot.msg(
            channel,
            "%s: %s" % (user, stylize.Trunicate(result.split('\n')[0], 300)))
    elif len(tree.findall('tips')) > 0:
        bot.msg(channel,
                "%s: %s" % (user, tree.find('tips').find('tip').get('text')))
    else:
        bot.msg(channel, "%s: I didn't find what you were looking for" % user)
Esempio n. 8
0
def parse(bot, channel, user, msg):
    s = msg.split()
    database = sqlite3.connect(dbfile)
    cursor = database.cursor()
    if len(s) == 0:
        fmuser = False
        for row in cursor.execute("SELECT lastfm FROM lastfm WHERE nick=?",
                                  (user, )):
            fmuser = row[0]
        if not fmuser:
            bot.msg(
                channel,
                "%s: I don't know you. Use :np register [lastfm nickname]" %
                user)
        else:
            url = apiurl.replace('{APIKEY}',
                                 lastfmkey).replace('{USER}', fmuser)
            data = json.load(urllib2.urlopen(url))
            if isinstance(data['recenttracks']['track'], list):
                artist = data['recenttracks']['track'][0]['artist'][
                    '#text'].encode('utf-8')
                track = data['recenttracks']['track'][0]['name'].encode(
                    'utf-8')
                album = data['recenttracks']['track'][0]['album'][
                    '#text'].encode('utf-8')
                np = data['recenttracks']['track'][0]['@attr']['nowplaying']
            else:
                artist = data['recenttracks']['track']['artist'][
                    '#text'].encode('utf-8')
                track = data['recenttracks']['track']['name'].encode('utf-8')
                album = data['recenttracks']['track']['album']['#text'].encode(
                    'utf-8')
                try:
                    np = data['recenttracks']['track']['@attr']['nowplaying']
                except:
                    np = 'false'
                else:
                    pass
            if 'true' in np:
                state = stylize.SetColor('now playing', stylize.Color.Green)
            else:
                state = stylize.SetColor('last heard', stylize.Color.Red)
            if album != '':
                bot.msg(channel, stylize.Bold(user) + ' ' + state + ' ' + \
                                 stylize.Bold(stylize.Trunicate(artist, 45)) + ' - ' + \
                                 stylize.Bold(stylize.Trunicate(track, 65)) + ' (Album: ' + \
                                 album + ')')
            else:
                bot.msg(channel, stylize.Bold(user) + ' ' + state + ' ' + \
                                 stylize.Bold(stylize.Trunicate(artist, 45)) + ' - ' + \
                                 stylize.Bold(stylize.Trunicate(track, 65)))
    elif s[0].lower() == "register":
        if len(s) != 2:
            bot.msg(channel,
                    "%s: Usage, :np register [last fm nickname]" % user)
        else:
            nick = s[1].strip()
            url = "http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss?limit=1&format=txt" % nick
            #print url # what does this do
            try:
                urllib2.urlopen(url).read()
            except:
                raw = 'No user exists with this name.'
            else:
                raw = urllib2.urlopen(url).read()
            if not "No user exists with this name." in raw:
                cursor.execute("DELETE FROM lastfm WHERE nick = ?", (user, ))
                cursor.execute(
                    "INSERT INTO lastfm(nick, lastfm) VALUES (?, ?)",
                    (user, nick))
                database.commit()
                bot.msg(
                    channel,
                    "Registered your username. Say :np to show what you're playing"
                )
            else:
                bot.msg(channel,
                        "No user found with the nickname \"%s\"" % nick)
    elif xbyx.match(s[0].lower()) is not None:
        fmuser = False
        for row in cursor.execute("SELECT lastfm FROM lastfm WHERE nick=?",
                                  (user, )):
            fmuser = row[0]
        if not fmuser:
            bot.msg(
                channel,
                "%s: I don't know you. Use :np register [lastfm nickname]" %
                user)
        else:
            url = collageurl % (s[0].lower(), fmuser)
            imgur = urllib2.urlopen("http://imgur.com/upload?url=%s" %
                                    url).geturl().split("/")[-1]
            bot.msg(channel, "%s: http://i.imgur.com/%s.jpg" % (user, imgur))

    cursor.close()