def get_rate(code): if code == 'CAD': return 1, 'Dolar canadience' elif code == 'BTC': rates = json.loads( web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin 24h' data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) if headers['_http_status'] == 404: return False, False xml = etree.fromstring(data) namestring = xml.find('{http://purl.org/rss/1.0/}channel/' '{http://purl.org/rss/1.0/}title').text #name = namestring[len('Bank of Canada noon rate: '):] #name = re.sub(r'\s*\(noon\)\s*', '', name) metajson = json.loads(open("UnivBot/modules/json/iso4217.json").read()) name = metajson[code] rate = xml.find( '{http://purl.org/rss/1.0/}item/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}statistics/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}exchangeRate/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}value').text return float(rate), name
def show_bug(bot, trigger, match=None): """Muestra información de un bug.""" match = match or trigger domain = match.group(1) if not bot.config.has_section( 'bugzilla') or domain not in bot.config.bugzilla.get_list( 'domains'): return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url, dont_decode=True) bug = etree.fromstring(data).find('bug') message = ( '[BUGZILLA] %s | Producto: %s | Componente: %s | Versión: %s | ' + 'Importancia: %s | Estado: %s | Asignado a: %s | ' + 'Reportado: %s | Modificado: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) bot.say(message)
def duck_search(query): query = query.replace('!', '') uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query bytes = web.get(uri) if 'web-result"' in bytes: #filter out the adds on top of the page bytes = bytes.split('web-result"')[1] m = r_duck.search(bytes) if m: return web.decode(m.group(1))
def add_traceback(bot, trigger): """Add a traceback to a GitHub issue. This pulls the traceback from the exceptions log file. To use, put .addtrace followed by the issue number to add the comment to, then the signature of the error (the message shown to the channel when the error occured). This command will only work for errors from unhandled exceptions.""" # Make sure the API is set up gitAPI = checkConfig(bot) if not gitAPI: return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined') # Make sure the input is valid args = trigger.group(2).split(None, 1) if len(args) != 2: bot.say('Please give both the issue number and the error message.') return number, trace = args # Make sure the given issue number exists issue_data = web.get('https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], number)) issue_data = json.loads(issue_data) if 'message' in issue_data and issue_data['message'] == 'Not Found': return bot.say("That issue doesn't exist.") # Find the relevant lines from the log file post = '' logfile = os.path.join(bot.config.logdir, 'exceptions.log') with open(logfile) as log: in_trace = False for data in log: if data == 'Signature: ' + trace + '\n': post = data in_trace = True elif data == '----------------------------------------\n': in_trace = False elif in_trace: post += data # Give an error if we didn't find the traceback if not post: return bot.say("I don't remember getting that error. Please post it " "yourself at https://github.com/%s/issues/%s" % (gitAPI[1], number)) # Make the comment try: raw = web.post('https://api.github.com/repos/' + gitAPI[1] + '/issues/' + number + '/comments?access_token=' + gitAPI[0], json.dumps({'body': '``\n' + post + '``'})) except OSError: # HTTPError: bot.say('The GitHub API returned an error.') return NOLIMIT data = json.loads(raw) bot.say('Added traceback to issue #%s. %s' % (number, data['html_url'])) bot.debug(__file__, 'Traceback added to #%s in %s.' % (number, trigger.sender), 'warning')
def get_info(number=None): if number: url = 'http://xkcd.com/{}/info.0.json'.format(number) else: url = 'http://xkcd.com/info.0.json' data = web.get(url) data = json.loads(data) data['url'] = 'http://xkcd.com/' + str(data['num']) return data
def duck_api(query): if '!bang' in query.lower(): return 'https://duckduckgo.com/bang.html' uri = 'http://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query results = json.loads(web.get(uri)) if results['Redirect']: return results['Redirect'] else: return None
def suggest(bot, trigger): """Autocomplementa alguna frase especificada.""" if not trigger.group(2): return bot.reply("No hay nada por buscar.") query = trigger.group(2) uri = 'http://websitedev.de/temp-bin/suggest.pl?q=' answer = web.get(uri + query.replace('+', '%2B')).replace( 'Perhaps', 'Resultados para') if answer: bot.say(answer) else: bot.reply('Lo siento, no encontré nada.')
def py(bot, trigger): """Evaluate a Python expression.""" if not trigger.group(2): return bot.say("Se necesita una expresión para evaluar") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/py/' answer = web.get(uri + web.quote(query)) if answer: bot.say(answer) else: bot.reply('Sin resultado correcto.')
def woeid_search(query): """ Find the first Where On Earth ID for the given query. Result is the etree node for the result, so that location data can still be retrieved. Returns None if there is no result, or the woeid field is empty. """ query = 'q=select * from geo.placefinder where text="%s"' % query body = web.get('http://query.yahooapis.com/v1/public/yql?' + query, dont_decode=True) parsed = etree.fromstring(body) first_result = parsed.find('results/Result') if first_result is None or len(first_result) == 0: return None return first_result
def mw_search(server, query, num): """ Searches the specified MediaWiki server for the given query, and returns the specified number of results. """ search_url = ('http://%s/w/api.php?format=json&action=query' '&list=search&srlimit=%d&srprop=timestamp&srwhat=text' '&srsearch=') % (server, num) search_url += query query = json.loads(web.get(search_url)) if 'query' in query: query = query['query']['search'] return [r['title'] for r in query] else: return None
def findIssue(bot, trigger): """Search for a GitHub issue by keyword or ID. usage: .findissue search keywords/ID (optional) You can specify the first keyword as "CLOSED" to search closed issues.""" if not trigger.group(2): return bot.reply('What are you searching for?') # Is the Oauth token and repo available? gitAPI = checkConfig(bot) if not gitAPI: return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined') firstParam = trigger.group(2).split(' ')[0] if firstParam.isdigit(): URL = 'https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], firstParam) elif firstParam == 'CLOSED': if '%20'.join(trigger.group(2).split(' ')[1:]) not in ('', '\x02', '\x03'): URL = 'https://api.github.com/legacy/issues/search/' + gitAPI[1] + '/closed/' + '%20'.join(trigger.group(2).split(' ')[1:]) else: return bot.reply('What are you searching for?') else: URL = 'https://api.github.com/legacy/issues/search/%s/open/%s' % (gitAPI[1], web.quote(trigger.group(2))) try: raw = web.get(URL) except HTTPError: bot.say('The GitHub API returned an error.') return NOLIMIT try: if firstParam.isdigit(): data = json.loads(raw) else: data = json.loads(raw)['issues'][-1] except (KeyError, IndexError): return bot.say('No search results.') try: if len(data['body'].split('\n')) > 1: body = data['body'].split('\n')[0] + '...' else: body = data['body'].split('\n')[0] except (KeyError): bot.debug( 'GitHub KeyErr', ('API returned an invalid result on query request ' + trigger.group(2)), 'always') bot.say('Invalid result, please try again later.') return NOLIMIT bot.reply('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body)) bot.say(data['html_url'])
def gettld(bot, trigger): """Show information about the given Top Level Domain.""" page = web.get(uri) search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(trigger.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if not matches: search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(trigger.group(2)) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = list(matches[0]) i = 0 while i < len(matches): matches[i] = r_tag.sub("", matches[i]) i += 1 desc = matches[2] if len(desc) > 400: desc = desc[:400] + "..." reply = "%s -- %s. IDN: %s, DNSSEC: %s" % (matches[1], desc, matches[3], matches[4]) bot.reply(reply) else: search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n' search = search.format(unicode(trigger.group(2))) re_country = re.compile(search) matches = re_country.findall(page) if matches: matches = matches[0] dict_val = dict() dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val[ "idn"], dict_val["dnssec"], dict_val["sld"] = matches for key in dict_val: if dict_val[key] == " ": dict_val[key] = "N/A" dict_val[key] = r_tag.sub('', dict_val[key]) if len(dict_val["notes"]) > 400: dict_val["notes"] = dict_val["notes"][:400] + "..." reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % ( dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"]) else: reply = "No matches found for TLD: {0}".format( unicode(trigger.group(2))) bot.reply(reply)
def mw_snippet(server, query): """ Retrives a snippet of the specified length from the given page on the given server. """ snippet_url = ('https://' + server + '/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') snippet_url += query snippet = json.loads(web.get(snippet_url)) snippet = snippet['query']['pages'] # For some reason, the API gives the page *number* as the key, so we just # grab the first page number in the results. snippet = snippet[list(snippet.keys())[0]] return snippet['extract']
def issue_info(bot, trigger, match=None): match = match or trigger URL = 'https://api.github.com/repos/%s/issues/%s' % (match.group(1), match.group(2)) try: raw = web.get(URL) except HTTPError: bot.say('The GitHub API returned an error.') return NOLIMIT data = json.loads(raw) try: if len(data['body'].split('\n')) > 1: body = data['body'].split('\n')[0] + '...' else: body = data['body'].split('\n')[0] except (KeyError): bot.say('The API says this is an invalid issue. Please report this if you know it\'s a correct link!') return NOLIMIT bot.say('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body))
def translate(text, in_lang='auto', out_lang='en'): raw = False if unicode(out_lang).endswith('-raw'): out_lang = out_lang[:-4] raw = True headers = { 'User-Agent': 'Mozilla/5.0' + '(X11; U; Linux i686)' + 'Gecko/20071127 Firefox/2.0.0.11' } url_query = { "client": "t", "sl": in_lang, "tl": out_lang, "q": text, } query_string = "&".join( "{key}={value}".format(key=key, value=value) for key, value in url_query.items() ) url = "http://translate.google.com/translate_a/t?{query}".format(query=query_string) result = web.get(url, timeout=40, headers=headers) while ',,' in result: result = result.replace(',,', ',null,') result = result.replace('[,', '[null,') data = json.loads(result) if raw: return str(data), 'en-raw' try: language = data[2] # -2][0][0] except: language = '?' return ''.join(x[0] for x in data[0]), language
def isup(bot, trigger): """isup.me website status checker""" site = trigger.group(2) if not site: return bot.reply("¿Que sitio web debo verificar?") if site[:6] != 'http://' and site[:7] != 'https://': if '://' in site: protocol = site.split('://')[0] + '://' return bot.reply("Intenta de nuevo sin el %s" % protocol) else: site = 'http://' + site try: response = web.get(site) except Exception: bot.say(site + ' parece caído desde aquí.') return if response: bot.say(site + ' está vivo desde aquí.') else: bot.say(site + ' parece caído desde aquí.')
def wa(bot, trigger): """Wolfram Alpha calculator""" if not trigger.group(2): return bot.reply("No search term.") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query.replace('+', 'plus')), 45, dont_decode=True) except timeout as e: return bot.say('[WOLFRAM ERROR] Request timed out') if answer: answer = answer.decode('unicode_escape') answer = HTMLParser.HTMLParser().unescape(answer) # This might not work if there are more than one instance of escaped # unicode chars But so far I haven't seen any examples of such output # examples from Wolfram Alpha match = re.search('\\\:([0-9A-Fa-f]{4})', answer) if match is not None: char_code = match.group(1) char = unichr(int(char_code, 16)) answer = answer.replace('\:' + char_code, char) waOutputArray = answer.split(";") if(len(waOutputArray) < 2): if(answer.strip() == "Couldn't grab results from json stringified precioussss."): # Answer isn't given in an IRC-able format, just link to it. bot.say('[WOLFRAM]Couldn\'t display answer, try http://www.wolframalpha.com/input/?i=' + query.replace(' ', '+')) else: bot.say('[WOLFRAM ERROR]' + answer) else: bot.say('[WOLFRAM] ' + waOutputArray[0] + " = " + waOutputArray[1]) waOutputArray = [] else: bot.reply('Sorry, no result.')
def movie(bot, trigger): """ Devuelve información de una película como el título, año de publicación, genero, rating de IMDB """ if not trigger.group(2): return word = trigger.group(2).rstrip() uri = "http://www.imdbapi.com/?t=" + word u = web.get(uri, 30) data = json.loads(u) # data is a Dict containing all the information we need if data['Response'] == 'False': if 'Error' in data: message = '[PELICULA] %s' % data['Error'] else: bot.debug(__file__, 'Se obtuvo un error de la API: %s' % word, 'warning') bot.debug(__file__, str(data), 'warning') message = '[PELICULA] Hubo un error en la Api de IMDB' else: message = '[PELICULA] Título: ' + data['Title'] + \ ' | Año: ' + data['Year'] + \ ' | Rating: ' + data['imdbRating'] + \ ' | Genero: ' + data['Genre'] + \ ' | Enlace a IMDB: http://imdb.com/title/' + data['imdbID'] bot.say(message)
def bing_search(query, lang='es-ES'): base = 'http://www.bing.com/search?mkt=%s&q=' % lang bytes = web.get(base + query) m = r_bing.search(bytes) if m: return m.group(1)
def google_ajax(query): """Search using AjaxSearch, and return its JSON.""" uri = 'http://ajax.googleapis.com/ajax/services/search/web' args = '?v=1.0&safe=off&q=' + query bytes = web.get(uri + args) return json.loads(bytes)