def search(args, nick): url = "http://www.urbandictionary.com/define.php?term=" + common.quote(args) content = common.read_url(url) match = re.search(r'<div class="definition">(.+?)</div>.*?<div class="example">(.+?)</div>', content) try: definition = match.group(1) example = match.group(2) except AttributeError: result = "Inga träffar. ({0})".format(url) else: def fixhtml(str): str = re.sub(r'(<br ?/>)+', r' ', str) str = re.sub('\n', r' ', str) str = re.sub('\r', r' ', str) str = re.sub(r'(<.+?>)+', r'', str) return common.unescape_html(str) # magic constants used when calculating space_left: # * 450 -- the smallest length of an irc message (hopefully; depends on server.) # * 20 -- the longest channel name including # (hopefully; i have no idea) # * 16 -- the length of the static crap in the result string (definitely) space_left = (450 - len('PRIVMSG :') - 20 - len(nick + ': ') - len(url) - 16) definition = common.truncate(fixhtml(definition), int(space_left/2)) example = common.truncate(fixhtml(example), int(space_left/2)) result = '{0} (Exempel: {1}) -- {2}'.format(definition, example, url) return result
def query(args): try: appid = common.read_lineconf(common.read_file('wolframalpha-api-key'))[0] except (IOError, IndexError): raise IOError('No appid to WolframAlpha™ was found.') m = re.search(r'^(.*?)( \| (.*?))?$', args) expr = m.group(1) xml = common.read_url('http://api.wolframalpha.com/v2/query?appid={0}&input='.format(appid), expr) output_type = 'approx' if not m.group(3) else m.group(3) root = ET.fromstring(xml) didyoumeans = root.find('didyoumeans') if didyoumeans: return 'Did you mean: \'{0}\'?'.format(didyoumeans.find('didyoumean').text) title = {'approx': 'Decimal approximation', 'exact': 'Exact result'} for pod in root: if pod.tag == 'pod': if pod.attrib['title'] == 'Result' or pod.attrib['title'] == 'Value' or pod.attrib['title'] == title[output_type]: return pod.find('subpod').find('plaintext').text return 'WolframAlpha™ doesn\'t have the answer.'
def xkcd_info(url, nick): """ Return the transcript and title of an xkcd page. """ try: data = common.read_url(url) except HTTPError: return '{}: kunde inte ladda sidan: {}'.format(nick, url) title_re = re.compile(r'<title>xkcd: (.+?)</div>') titlebackup_re = re.compile(r'<div id="ctitle">(.+?)</div>') transcript_re = re.compile(r'<div id="transcript" .+?>(?P<transcript>.*?)(\{\{(?P<alt>.+?)\}\})?</div>', re.DOTALL) # Transcript result = transcript_re.search(data) transcript = [line.strip() for line in result.group('transcript').splitlines() if line.strip()] if not transcript: transcript = ['Ingen beskrivning än!'] # Unused for now - also borken if no transcript is available # alttext = result.group('alt').strip() # Title title = title_re.search(data) if not title: title = titlebackup_re.search(data) firstline = '{} – {}'.format(title.group(1), url) return [common.truncate(common.unescape_html(x), 400) for x in [firstline] + transcript[:3]]
def lastfm(args): try: key = common.read_lineconf(common.read_file("lastfm-api-key"))[0] if not (len(key) == 32 and re.search(r'^[0-9a-f]+$', key)): raise IOError except (IOError, IndexError): raise IOError('Ingen nyckel för last.fm-API:et kunde hittas. Skapa filen lastfm-api-key med enbart nyckeln i sig i botens arbetskatalog.') try: content = common.read_url("http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&limit=1&api_key={0}&user={1}".format(key, common.quote(args))) except HTTPError: return "Kunde inte hitta en last.fm-användare med namnet {}.".format(args) dom = xml.dom.minidom.parseString(content) latesttrack = dom.getElementsByTagName('track')[0] artist = latesttrack.getElementsByTagName('artist')[0].childNodes[0].data title = latesttrack.getElementsByTagName('name')[0].childNodes[0].data returnstr = "{0}".format(args) if (latesttrack.hasAttribute("nowplaying")): playstatus = "spelar just nu" else: playstatus = "spelade senast" return "{0} {1} {2} ({3}) -- History: http://www.last.fm/user/{0}/tracks".format(args, playstatus, title, artist)
def calc(args): content = common.read_url("http://www.google.com/ig/calculator?h=en&q=", args) lhs = sanitise(re.search(r'lhs: "(.*?)"', content).group(1)) rhs = sanitise(re.search(r'rhs: "(.*?)"', content).group(1)) err = sanitise(re.search(r'error: "(.*?)"', content).group(1)) if err: return "Ogiltigt uttryck enligt Google! (Felkod: {0})".format(err) else: return "{0} = {1}".format(lhs, rhs)
def pp_search(args, url_re): """ Search google for a Profound Programmer page matching the args. Return the url. """ searchterms = '{} site:theprofoundprogrammer.com/post/'.format(args) hits = common.read_json(common.read_url("http://ajax.googleapis.com/ajax/services/search/web?v=1.0&safe=off&q=", searchterms))['responseData']['results'] if not hits: return None striphtml = lambda s: re.sub(r'<.+?>', '', re.sub(r' +', '', s)) return striphtml(hits[0]['url'])
def search(args): hits = common.read_json(common.read_url("http://ajax.googleapis.com/ajax/services/search/web?v=1.0&safe=off&q=", args))['responseData']['results'] if hits: striphtml = lambda s: re.sub(r'<.+?>', '', re.sub(r'( +|\n)', '', s)) url = striphtml(hits[0]['unescapedUrl']) title = striphtml(hits[0]['titleNoFormatting']) content = striphtml(hits[0]['content']) result = "{1}: {2} -- {0}".format(url, title, content) else: result = "No hits." return result
def haskell(args): data = common.read_url("http://tryhaskell.org/haskell.json?method=eval&expr=", args) jsondata = json.JSONDecoder(strict=False).decode(data) if 'result' in jsondata and jsondata['result'] and 'type' in jsondata: return '{result} :: {type}'.format(**jsondata) elif 'type' in jsondata: return jsondata['type'] elif 'result' in jsondata: return jsondata['result'] elif 'error' in jsondata: return jsondata['error'].replace(' +',' ') elif 'exception' in jsondata: return jsondata['exception']
def pp_info(url, nick): """ Return the transcript and link to the image of a Profound Programmer page. """ try: data = common.read_url(url) except HTTPError: return '{}: kunde inte ladda sidan: {}'.format(nick, url) main_re = re.compile(r""" <li\ class="post\ photo"> \s* <img\ src="(?P<img>.+?)" .+? <div\ class="caption"><p> \[(?P<transcript>.+?)\] </p> \s* (<p><a\ href="(?P<hdimg>.+?)">\[HD\ Version\]</a>)? """, re.DOTALL | re.VERBOSE) transcript_re = re.compile(r'text\s?:? (“(?P<title1>.+?)”|‘(?P<title2>.+?)’)?([,;] )?(?P<transcript>.+)', re.DOTALL) result = main_re.search(data) if not result: print(url) raise AttributeError('.profound could not match the regex! Has theprofoundprogrammer.org change format?') rawtranscript = transcript_re.match(common.unescape_html(sanitize(result.group('transcript')))) title = None if rawtranscript: for t in ('title1', 'title2'): if rawtranscript.group(t): title = rawtranscript.group(t) transcript = rawtranscript.group('transcript') else: transcript = common.unescape_html(result.group('transcript')) if result.group('hdimg'): image = result.group('hdimg') else: image = result.group('img') out = ['[{}]'.format(transcript)] + [image] if title: out = ['"{}"'.format(title)] + out return [common.truncate(x, 400) for x in out]
def isitdown(args): if not args.strip(): return "skriv in en url för att se om den är nere bara för dej" content = common.read_url("http://www.downforeveryoneorjustme.com/", common.quote(args)) regex = re.search(r'<div id="container">\s*(.+?)\s*<p><a href="/">', content, re.DOTALL) # This will – and is supposed to – crash if the regex fails. text = regex.group(1).lower() if text.startswith("it's not just you"): return "sidan verkar vara nere!" elif text.startswith("it's just you"): return "sidan verkar inte vara nere" elif text.startswith('huh?'): return "du verkar inte ha skrivit in en url" else: raise AttributeError('error in .down plugin, major parse error')