def get_rate(code): code = code.upper() if code == 'CAD': return 1, 'Canadian Dollar' elif code == 'BTC': rates = json.loads( web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) if headers['_http_status'] == 404: return False, False namespaces = { 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb', 'http://purl.org/rss/1.0/': None, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' } xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF') namestring = xml.get('channel').get('title').get('#text') name = namestring[len('Bank of Canada noon rate: '):] name = re.sub(r'\s*\(noon\)\s*', '', name) rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get( 'cb:value').get('#text') return float(rate), name
def github_repo(bot, trigger, match=None): match = match or trigger repo = match.group(2) or match.group(1) if repo.lower() == 'version': return bot.say( '[idlerpg] Version {} by {}, report issues at {}'.format( github.__version__, github.__author__, github.__repo__)) if repo.lower() == 'status': current = json.loads( web.get('https://status.github.com/api/status.json')) lastcomm = json.loads( web.get('https://status.github.com/api/last-message.json')) status = current['status'] if status == 'major': status = "\x02\x034Broken\x03\x02" elif status == 'minor': status = "\x02\x037Shakey\x03\x02" elif status == 'good': status = "\x02\x033Online\x03\x02" lstatus = lastcomm['status'] if lstatus == 'major': lstatus = "\x02\x034Broken\x03\x02" elif lstatus == 'minor': lstatus = "\x02\x037Shakey\x03\x02" elif lstatus == 'good': lstatus = "\x02\x033Online\x03\x02" timezone = get_timezone(bot.db, bot.config, None, trigger.nick) if not timezone: timezone = 'UTC' lastcomm['created_on'] = format_time(bot.db, bot.config, timezone, trigger.nick, trigger.sender, from_utc(lastcomm['created_on'])) return bot.say('[Github] Current Status: ' + status + ' | Last Message: ' + lstatus + ': ' + lastcomm['body'] + ' (' + lastcomm['created_on'] + ')') elif repo.lower() == 'rate-limit': return bot.say( fetch_api_endpoint(bot, 'https://api.github.com/rate_limit')) if '/' not in repo: repo = trigger.nick.strip() + '/' + repo URL = 'https://api.github.com/repos/%s' % (repo.strip()) fmt_response(bot, trigger, URL)
def show_bug(bot, trigger, match=None): """Show information about a Bugzilla bug.""" match = match or trigger domain = match.group(1) if domain not in bot.config.bugzilla.domains: return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url, dont_decode=True) bug = xmltodict.parse(data).get('bugzilla').get('bug') error = bug.get('@error', None) # error="NotPermitted" if error: LOGGER.warning('Bugzilla error: %s', error) return message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.get('resolution') if resolution is not None: status = bug.get('bug_status') + ' ' + resolution else: status = bug.get('bug_status') assigned_to = bug.get('assigned_to') if isinstance(assigned_to, dict): assigned_to = assigned_to.get('@name') message = message % (bug.get('short_desc'), bug.get('product'), bug.get('component'), bug.get('version'), (bug.get('priority') + ' ' + bug.get('bug_severity')), status, assigned_to, bug.get('creation_ts'), bug.get('delta_ts')) bot.say(message)
def api_bmark(bot, trigger, found_match=None, extra=None): url = found_match or trigger bytes = web.get(url) # XXX: needs a patch to the URL module title = find_title(content=bytes) if title is None: title = '[untitled]' data = { u'url': url, u'is_private': int(api_private), u'description': title.encode('utf-8'), u'content': bytes } if extra is not None: # extract #tags, uniquely # copied from http://stackoverflow.com/a/6331688/1174784 tags = {tag.strip("#") for tag in extra.split() if tag.startswith("#")} if tags: data['tags'] = ' '.join(tags) # strip tags from message and see what's left message = re.sub(r'#\w+', '', extra).strip() if message != '': # something more than hashtags was provided data['extended'] = extra return [title, get_hostname(url)] + list(api(bot, trigger, 'bmark', data))
def weather(bot, trigger): """.weather location - Show the weather at the given location.""" location = trigger.group(2) woeid = '' if not location: woeid = bot.db.get_nick_value(trigger.nick, 'woeid') if not woeid: return bot.msg( trigger.sender, "I don't know where you live. " + 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.' ) else: location = location.strip() woeid = bot.db.get_nick_value(location, 'woeid') if woeid is None: first_result = woeid_search(location) if first_result is not None: woeid = first_result.get('woeid') if not woeid: return bot.reply("I don't know where that is.") query = web.urlencode({'w': woeid, 'u': 'c'}) raw = web.get('http://weather.yahooapis.com/forecastrss?' + query, dont_decode=True) parsed = xmltodict.parse(raw).get('rss') location = parsed.get('channel').get('title') cover = get_cover(parsed) temp = get_temp(parsed) humidity = get_humidity(parsed) wind = get_wind(parsed) bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))
def find_title(url): """Return the title for the given URL.""" try: content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes) except UnicodeDecodeError: return # Fail silently when data can't be decoded # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r'<\1title>', content) content = quoted_title.sub('', content) start = content.find('<title>') end = content.find('</title>') if start == -1 or end == -1: return title = web.decode(content[start + 7:end]) title = title.strip()[:200] title = ' '.join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub('', title) return title or None
def woeid_search(query): """ Find the first Where On Earth ID for the given query. Result is the etree node for the result, so that location data can still be retrieved. Returns None if there is no result, or the woeid field is empty. """ query = 'q=select woeid from geo.places where text="%s"' % query body = web.get('http://query.yahooapis.com/v1/public/yql?' + query, dont_decode=True) parsed = xmltodict.parse(body).get('query') results = parsed.get('results') if not results: return None elif type(results) is collections.OrderedDict: place = results.get('place') elif type(results) is list: place = results[0].get('place') else: return None if not place: return None elif type(place) is collections.OrderedDict: return place elif type(place) is list: return place[0] else: return None
def uptime(bot, trigger): """ Report the stream uptime. """ try: query_url = 'https://api.twitch.tv/kraken/streams/{0}?api_version=3&client_id={1}' answer = web.get(query_url.format(trigger.sender[1:], bot.config.LRB.api_key)) except: return bot.reply("Couldn't contact the Twitch API servers. :( #BlameTwitch") try: data = json.loads(answer) except: return bot.reply("The Twitch API returned an invalid object. :( #BlameTwitch") if data['stream'] != None: startTime = data['stream']['created_at'] else: return bot.reply("Stream offline. :(") f = '%Y-%m-%dT%H:%M:%SZ' tStart = datetime.datetime.strptime(startTime, f) now = datetime.datetime.utcnow() uptime = (now - tStart).seconds h, r = divmod(uptime, 3600) m, s = divmod(r, 60) if h > 0: return bot.reply('Stream has been online for %s:%s:%s' % (h,m,s)) else: return bot.reply('Stream has been online for %s:%s' % (m,s))
def weather(bot, trigger): """.weather location - Show the weather at the given location.""" location = trigger.group(2) woeid = '' if not location: woeid = bot.db.get_nick_value(trigger.nick, 'woeid') if not woeid: return bot.msg(trigger.sender, "I don't know where you live. " + 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.') else: location = location.strip() woeid = bot.db.get_nick_value(location, 'woeid') if woeid is None: first_result = woeid_search(location) if first_result is not None: woeid = first_result.get('woeid') if not woeid: return bot.reply("I don't know where that is.") query = 'q=select * from weather.forecast where woeid="%s" and u=\'c\'' % woeid body = web.get('http://query.yahooapis.com/v1/public/yql?' + query, dont_decode=True) parsed = xmltodict.parse(body).get('query') results = parsed.get('results') if results is None: return bot.reply("No forecast available. Try a more specific location.") location = results.get('channel').get('title') cover = get_cover(results) temp = get_temp(results) humidity = get_humidity(results) wind = get_wind(results) bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))
def cancelled(bot, trigger): """Show current cancelled classes at MUN""" page, headers = web.get(uri, return_headers=True) if headers['_http_status'] != 200: bot.say('Couldn\'t find cancellation information.') return parsed = html.fromstring(page) middle = parsed.get_element_by_id('middle') contents = list(middle) reply = [] if trigger.nick != trigger.sender: bot.reply('I\'m messaging you with a detailed cancellation list!') for element in contents: if element.tag=='p' and element.text_content() == '________________________________________': break elif element.tag=='h2': printed = True text = element.text_content() day = parser.parse(text) if day.date() == datetime.today().date(): reply.append('MUN\'s Cancellations for ' + bold(text) + ' (TODAY):') else: reply.append('MUN\'s Cancellations for ' + bold(text) + ': ') elif element.tag=='p': text = element.text_content() course = list(element)[0].text_content() reply.append(bold(course) + text[len(course):]) for a in reply: bot.msg(trigger.nick, a)
def ytsearch(bot, trigger): """ .youtube <query> - Search YouTube """ if not trigger.group(2): return uri = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&q=' + trigger.group(2) raw = web.get('{0}&key={1}'.format(uri, bot.config.google.public_key)) vid = json.loads(raw)['items'][0]['id']['videoId'] uri = 'https://www.googleapis.com/youtube/v3/videos?id=' + vid + '&part=contentDetails,snippet,statistics' video_info = ytget(bot, trigger, uri) if video_info is None: return title = video_info['snippet']['title'] uploader = video_info['snippet']['channelTitle'] duration = video_info['contentDetails']['duration'] views = video_info['statistics']['viewCount'] likes = video_info['statistics']['likeCount'] dislikes = video_info['statistics']['dislikeCount'] message = '[YT Search] {0} | https://youtu.be/{1} | Duration: {2} | Views: {3} | Uploader: {4} | {5} | {6}'.format( bold(title), video_info['id'], duration, views, uploader, color(likes, colors.GREEN), color(dislikes, colors.RED)) bot.say(message)
def isup(bot, trigger): """isup.me website status checker""" site = trigger.group(2) if not site: return bot.reply("What site do you want to check?") if site[:6] != 'http://' and site[:7] != 'https://': if '://' in site: protocol = site.split('://')[0] + '://' return bot.reply("Try it again without the %s" % protocol) else: site = 'http://' + site if not '.' in site: site += ".com" try: response = web.get(site) except Exception: bot.say(site + ' looks down from here.') return if response: bot.say(site + ' looks fine to me.') else: bot.say(site + ' is down from here.')
def mw_snippet(server, query, bot): """ Retrives a snippet of the specified length from the given page on the given server. """ if bot.config.lang == 'fr': snippet_url = ('https://fr.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') elif bot.config.lang == 'es': snippet_url = ('https://es.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') else: snippet_url = ('https://en.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') if bot.config.lang == 'fr' or bot.config.lang == 'es': snippet_url += web.quote(query.encode('utf-8')) else: snippet_url += web.quote(query.encode('cp1252')) snippet = json.loads(web.get(snippet_url)) snippet = snippet['query']['pages'] # For some reason, the API gives the page *number* as the key, so we just # grab the first page number in the results. snippet = snippet[snippet.keys()[0]] return snippet['extract']
def wikt(word): bytes = web.get(uri % web.quote(word)) bytes = r_ul.sub('', bytes) mode = None etymology = None definitions = {} for line in bytes.splitlines(): if 'id="Etymology"' in line: mode = 'etymology' elif 'id="Noun"' in line: mode = 'noun' elif 'id="Verb"' in line: mode = 'verb' elif 'id="Adjective"' in line: mode = 'adjective' elif 'id="Adverb"' in line: mode = 'adverb' elif 'id="Interjection"' in line: mode = 'interjection' elif 'id="Particle"' in line: mode = 'particle' elif 'id="Preposition"' in line: mode = 'preposition' elif 'id="' in line: mode = None elif (mode == 'etmyology') and ('<p>' in line): etymology = text(line) elif (mode is not None) and ('<li>' in line): definitions.setdefault(mode, []).append(text(line)) if '<hr' in line: break return etymology, definitions
def find_title(url=None, content=None): """Return the title for the given URL. Copy of find_title that allows for avoiding duplicate requests.""" if (not content and not url) or (content and url): raise ValueError("url *or* content needs to be provided to find_title") if url: try: content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes) except UnicodeDecodeError: return # Fail silently when data can't be decoded assert content # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r"<\1title>", content) content = quoted_title.sub("", content) start = content.find("<title>") end = content.find("</title>") if start == -1 or end == -1: return title = web.decode(content[start + 7 : end]) title = title.strip()[:200] title = " ".join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub("", title) return title or None
def movie(bot, trigger): """ Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. """ if not trigger.group(2): return word = trigger.group(2).rstrip() uri = "http://www.imdbapi.com/?t=" + word u = web.get(uri, 30) data = json.loads(u) # data is a Dict containing all the information we need if data['Response'] == 'False': if 'Error' in data: message = '[MOVIE] %s' % data['Error'] else: LOGGER.warning( 'Got an error from the imdb api, search phrase was %s; data was %s', word, str(data)) message = '[MOVIE] Got an error from imdbapi' else: message = '[MOVIE] Title: ' + data['Title'] + \ ' | Year: ' + data['Year'] + \ ' | Rating: ' + data['imdbRating'] + \ ' | Genre: ' + data['Genre'] + \ ' | IMDB Link: http://imdb.com/title/' + data['imdbID'] bot.say(message)
def bing_search(query, lang='en-GB'): base = 'http://www.bing.com/search?mkt=%s&q=' % lang bytes = web.get(base + query) m = r_bing.search(bytes) # print m if m: return m.group(1)
def show_bug(bot, trigger, match=None): """Show information about a Bugzilla bug.""" match = match or trigger domain = match.group(1) if domain not in bot.config.bugzilla.domains: return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url, dont_decode=True) bug = etree.fromstring(data).find('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) bot.say(message)
def vimeo_by_url(bot, trigger, found_match=None): match = found_match or trigger videoID = match.group(2) apiURL = "https://vimeo.com/api/v2/video/" + videoID + ".json" try: resp = json.loads(web.get(apiURL)) except: return output = u"[Vimeo] " output += u"Title: %s" % (str(resp[0]['title'])) if 'user_name' in resp[0]: output += u" | Uploader: %s" % (str(resp[0]['user_name'])) #if 'upload_date' in resp[0]: # output += u" | Uploaded: %s" % (str(resp[0]['upload_date'])) if 'duration' in resp[0]: output += u" | Duration: %s" % (str(resp[0]['duration'])) if 'stats_number_of_plays' in resp[0]: output += u" | Views : %s" % (str(resp[0]['stats_number_of_plays'])) #if 'stats_number_of_comments' in resp[0]: # output += u" | Comments: %s" % (str(resp[0]['stats_number_of_comments'])) #if 'stats_number_of_likes' in resp[0]: # output += u" | Likes: %s" % (str(resp[0]['stats_number_of_likes'])) bot.say(output)
def get_def(word, num=0): url = UD_URL + word try: resp = json.loads(web.get(url)) except UnicodeError: definition = ('ENGLISH M**********R, DO YOU SPEAK IT?') return definition nom = num + 1 if resp['result_type'] == 'no_results': definition = 'Definition %s not found!' % (word) else: try: item = resp['list'][num]['definition'].encode('utf8') thumbsup = resp['list'][num]['thumbs_up'] thumbsdown = resp['list'][num]['thumbs_down'] points = str(int(thumbsup) - int(thumbsdown)) total_nom = len(resp['list']) definition = 'Definition: ' + str(item) + " >> Number: " + str( nom) + '/' + str( total_nom) + ' >> Points: ' + points + ' (03' + str( thumbsup) + '|05' + str(thumbsdown) + ')' except IndexError: definition = ('Definition entry %s does' 'not exist for \'%s\'.' % (nom, word)) return definition
def get_np_info(username): username = web.quote(username) api_key = "API_KEY" recent_tracks = web.get("http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&format=json&user=%s&api_key=%s" % (username, api_key)) #now playing track, or most recently scrobbled track now_playing = json.loads(recent_tracks) # if the user does not exist if 'recenttracks' not in now_playing: return None now_playing = now_playing['recenttracks']['track'][0] track = now_playing['name'] album = now_playing['album']['#text'] artist = now_playing['artist']['#text'] import sys reload(sys) sys.setdefaultencoding('utf8') # why the f**k doesnt this work with web.get() ??? track_page = urllib.urlopen("http://ws.audioscrobbler.com/2.0/?method=track.getInfo&format=json&artist=%s&track=%s&username=%s&api_key=%s" % (web.quote(artist), web.quote(track), username, api_key)) track_info = json.loads(track_page.read().decode(), encoding='utf-8')['track'] user_playcount = "0" if 'userplaycount' in track_info: user_playcount = track_info['userplaycount'] user_loved = False if int(track_info['userloved']) > 0: user_loved = True return {"track": track, "album": album, "artist": artist, "user_playcount": user_playcount, "user_loved": user_loved}
def weather(bot, trigger): """.weather location - Show the weather at the given location.""" location = trigger.group(2) woeid = '' if not location: woeid = bot.db.get_nick_value(trigger.nick, 'woeid') if not woeid: return bot.msg(trigger.sender, "I don't know where you live. " + 'Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.') else: location = location.strip() woeid = bot.db.get_nick_value(location, 'woeid') if woeid is None: first_result = woeid_search(location) if first_result is not None: woeid = first_result.get('woeid') if not woeid: return bot.reply("I don't know where that is.") query = web.urlencode({'w': woeid, 'u': 'c'}) raw = web.get('http://weather.yahooapis.com/forecastrss?' + query, dont_decode=True) parsed = xmltodict.parse(raw).get('rss') location = parsed.get('channel').get('title') cover = get_cover(parsed) temp = get_temp(parsed) humidity = get_humidity(parsed) wind = get_wind(parsed) bot.say(u'%s: %s, %s, %s, %s' % (location, cover, temp, humidity, wind))
def etymology(word): # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) > 25: raise ValueError("Word too long: %s[...]" % word[:10]) word = {'axe': 'ax/axe'}.get(word, word) bytes = web.get(etyuri % word) definitions = r_definition.findall(bytes) if not definitions: return None defn = text(definitions[0]) m = r_sentence.match(defn) if not m: return None sentence = m.group(0) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' [...]' sentence = '"' + sentence.replace('"', "'") + '"' return sentence + ' - ' + (etyuri % word)
def show_bug(bot, trigger, match=None): """Show information about a Bugzilla bug.""" match = match or trigger domain = match.group(1) if domain not in bot.config.bugzilla.domains: return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url, dont_decode=True) bug = xmltodict.parse(data).get('bugzilla').get('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.get('resolution') if resolution is not None: status = bug.get('bug_status') + ' ' + resolution else: status = bug.get('bug_status') message = message % ( bug.get('short_desc'), bug.get('product'), bug.get('component'), bug.get('version'), (bug.get('priority') + ' ' + bug.get('bug_severity')), status, bug.get('assigned_to').get('@name'), bug.get('creation_ts'), bug.get('delta_ts')) bot.say(message)
def movie(bot, trigger): """ Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. """ if not trigger.group(2): return word = trigger.group(2).rstrip() uri = "http://www.omdbapi.com/?t=" + word u = web.get(uri, 30) data = json.loads( u) # data is a Dict containing all the information we need if data['Response'] == 'False': if 'Error' in data: message = '[MOVIE] %s' % data['Error'] else: LOGGER.warning( 'Got an error from the OMDb api, search phrase was %s; data was %s', word, str(data)) message = '[MOVIE] Got an error from OMDbapi' else: message = '[MOVIE] Title: ' + data['Title'] + \ ' | Year: ' + data['Year'] + \ ' | Rating: ' + data['imdbRating'] + \ ' | Genre: ' + data['Genre'] + \ ' | IMDB Link: http://imdb.com/title/' + data['imdbID'] bot.say(message)
def short_cancelled(bot, trigger): """Display short list of cancelled courses at MUN""" page, headers = web.get(uri, return_headers=True) if headers['_http_status'] != 200: bot.say('Couldn\'t find cancellation information.') return parsed = html.fromstring(page) middle = parsed.get_element_by_id('middle') contents = list(middle) reply = '' for element in contents: if element.tag=='p' and element.text_content() == '________________________________________': break elif element.tag=='h2': printed = True text = element.text_content() day = parser.parse(text) if day.date() == datetime.today().date(): reply += '| MUN\'s Cancellations for ' + bold(text) + ' (TODAY): ' else: reply += '| MUN\'s Cancellations for ' + bold(text) + ': ' elif element.tag=='p': text = element.text_content() course = list(element)[0].text_content() reply += course + ', ' bot.say(reply[2:-2]) bot.say('Use \'.canceldetail\' for more detailed information')
def get_np_info(username): username = web.quote(username) api_key = "782c02b1c96ae181d83850f050509103" recent_tracks = web.get("http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&format=json&user=%s&api_key=%s" % (username, api_key)) #now playing track, or most recently scrobbled track now_playing = json.loads(recent_tracks) # if the user does not exist if 'recenttracks' not in now_playing: return None now_playing = now_playing['recenttracks']['track'][0] track = now_playing['name'] album = now_playing['album']['#text'] artist = now_playing['artist']['#text'] # why the f**k doesnt this work with web.get() ??? track_page = urllib.request.urlopen("http://ws.audioscrobbler.com/2.0/?method=track.getInfo&format=json&artist=%s&track=%s&username=%s&api_key=%s" % (web.quote(artist), web.quote(track), username, api_key)) track_info = json.loads(track_page.read().decode())['track'] user_playcount = "0" if 'userplaycount' in track_info: user_playcount = track_info['userplaycount'] user_loved = False if int(track_info['userloved']) > 0: user_loved = True return {"track": track, "album": album, "artist": artist, "user_playcount": user_playcount, "user_loved": user_loved}
def find_title(url=None, content=None): """Return the title for the given URL. Copy of find_title that allows for avoiding duplicate requests.""" if (not content and not url) or (content and url): raise ValueError('url *or* content needs to be provided to find_title') if url: try: content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes) except UnicodeDecodeError: return # Fail silently when data can't be decoded assert content # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r'<\1title>', content) content = quoted_title.sub('', content) start = content.find('<title>') end = content.find('</title>') if start == -1 or end == -1: return title = web.decode(content[start + 7:end]) title = title.strip()[:200] title = ' '.join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub('', title) return title or None
def vimeo_by_url(bot, trigger, found_match=None): match = found_match or trigger videoID = match.group(2) apiURL = "https://vimeo.com/api/v2/video/" + videoID + ".json" try: resp = json.loads(web.get(apiURL)) except: return output = u"[Vimeo] " output += u"Title: %s" % (str(resp[0]['title'])) if 'user_name' in resp[0]: output += u" | Uploader: %s" % (str(resp[0]['user_name'])) if 'upload_date' in resp[0]: output += u" | Uploaded: %s" % (str(resp[0]['upload_date'])) if 'duration' in resp[0]: output += u" | Duration: %s" % (str(resp[0]['duration'])) if 'stats_number_of_plays' in resp[0]: output += u" | Views : %s" % (str(resp[0]['stats_number_of_plays'])) if 'stats_number_of_comments' in resp[0]: output += u" | Comments: %s" % (str(resp[0]['stats_number_of_comments'])) if 'stats_number_of_likes' in resp[0]: output += u" | Likes: %s" % (str(resp[0]['stats_number_of_likes'])) bot.say(output)
def weather(bot, trigger): """.weather location - Show the weather at the given location.""" location = trigger.group(2) woeid = "" if not location: woeid = bot.db.get_nick_value(trigger.nick, "woeid") if not woeid: return bot.msg( trigger.sender, "I don't know where you live. " + "Give me a location, like .weather London, or tell me where you live by saying .setlocation London, for example.", ) else: location = location.strip() woeid = bot.db.get_nick_value(location, "woeid") if woeid is None: first_result = woeid_search(location) if first_result is not None: woeid = first_result.get("woeid") if not woeid: return bot.reply("I don't know where that is.") query = "q=select * from weather.forecast where woeid=\"%s\" and u='c'" % woeid body = web.get("http://query.yahooapis.com/v1/public/yql?" + query, dont_decode=True) parsed = xmltodict.parse(body).get("query") results = parsed.get("results") location = results.get("channel").get("title") cover = get_cover(results) temp = get_temp(results) humidity = get_humidity(results) wind = get_wind(results) bot.say("%s: %s, %s, %s, %s" % (location, cover, temp, humidity, wind))
def woeid_search(query): """ Find the first Where On Earth ID for the given query. Result is the etree node for the result, so that location data can still be retrieved. Returns None if there is no result, or the woeid field is empty. """ query = 'q=select woeid from geo.places where text="%s"' % query body = web.get('http://query.yahooapis.com/v1/public/yql?' + query, dont_decode=True) parsed = xmltodict.parse(body).get('query') results = parsed.get('results') if type(results) is collections.OrderedDict: place = results.get('place') if type(place) is list: return place[0] if not (results): return None if type(results) is list: first_result = result[0] return results.get('place') # If its not a list then it should be an Ordered dict place = results.get('place') if not place: return None if type(place) is collections.OrderedDict: return place if type(place) is list: return place[0] return None
def movie(bot, trigger): """ Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. """ if not trigger.group(2): return word = trigger.group(2).rstrip() uri = "http://www.omdbapi.com/?t=" + word u = web.get(uri, 30) data = json.loads(u) # data is a Dict containing all the information we need if data["Response"] == "False": if "Error" in data: message = "[MOVIE] %s" % data["Error"] else: LOGGER.warning("Got an error from the OMDb api, search phrase was %s; data was %s", word, str(data)) message = "[MOVIE] Got an error from OMDbapi" else: message = ( "[MOVIE] Title: " + data["Title"] + " | Year: " + data["Year"] + " | Rating: " + data["imdbRating"] + " | Genre: " + data["Genre"] + " | IMDB Link: http://imdb.com/title/" + data["imdbID"] ) bot.say(message)
def bing_search(query, lang="en-GB"): base = "http://www.bing.com/search?mkt=%s&q=" % lang bytes = web.get(base + query) m = r_bing.search(bytes) # print m if m: return m.group(1)
def get_info(url=None): if url: api_url = 'http://backend.deviantart.com/oembed?url={0}'.format(url) else: return None data = web.get(api_url) data = json.loads(data) return data
def query_lastfm(bot, **kwargs): args = [] for k in kwargs: args.append("{}={}".format(k, kwargs[k])) args.append("api_key={}".format(get_api_key(bot))) args.append("format=json") url = "http://ws.audioscrobbler.com/2.0/?" + "&".join(args) return web.get(url)
def query_lastfm(bot, **kwargs): args = [] for k in kwargs: args.append("{}={}".format(k, kwargs[k])) args.append('api_key={}'.format(get_api_key(bot))) args.append('format=json') url = 'http://ws.audioscrobbler.com/2.0/?' + '&'.join(args) return web.get(url)
def duck_search(query): query = query.replace('!', '') query = web.quote(query) uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query bytes = web.get(uri) m = r_duck.search(bytes) if m: return web.decode(m.group(1))
def duck_search(query): query = query.replace('!', '') uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query bytes = web.get(uri) if 'web-result"' in bytes: # filter out the adds on top of the page bytes = bytes.split('web-result"')[1] m = r_duck.search(bytes) if m: return web.decode(m.group(1))
def get_info(number=None): if number: url = 'http://xkcd.com/{}/info.0.json'.format(number) else: url = 'http://xkcd.com/info.0.json' data = web.get(url) data = json.loads(data) data['url'] = 'http://xkcd.com/' + str(data['num']) return data
def duck_api(query): if '!bang' in query.lower(): return 'https://duckduckgo.com/bang.html' uri = 'http://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query results = json.loads(web.get(uri)) if results['Redirect']: return results['Redirect'] else: return None
def duck_search(query): query = query.replace('!', '') uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query bytes = web.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}) if 'web-result' in bytes: # filter out the adds on top of the page bytes = bytes.split('web-result')[1] m = r_duck.search(bytes) if m: unquoted_m = unquote(m.group(1)) return web.decode(unquoted_m)
def insult(bot, trigger): """ .insult [target] """ raw = web.get('http://quandyfactory.com/insult/json') insults = json.loads(raw) if trigger.group(3): bot.say(trigger.group(3) + ', ' + insults['insult']) else: bot.say(trigger.nick + ', ' + insults['insult'])
def request(self, input): """ Sends a request to the API. Only publicly available data is accessible. Returns data as JSON. """ headers = {'Authorization': 'Client-ID ' + self.client_id, 'Accept': 'application/json'} request = web.get(self.api_url + input, headers=headers) #FIXME: raise for status return json.loads(request)
def github_repo(bot, trigger, match=None): match = match or trigger repo = match.group(2) or match.group(1) if repo.lower() == 'version': return bot.say('[idlerpg] Version {} by {}, report issues at {}'.format( github.__version__, github.__author__, github.__repo__)) if repo.lower() == 'status': current = json.loads(web.get('https://status.github.com/api/status.json')) lastcomm = json.loads(web.get('https://status.github.com/api/last-message.json')) status = current['status'] if status == 'major': status = "\x02\x034Broken\x03\x02" elif status == 'minor': status = "\x02\x037Shakey\x03\x02" elif status == 'good': status = "\x02\x033Online\x03\x02" lstatus = lastcomm['status'] if lstatus == 'major': lstatus = "\x02\x034Broken\x03\x02" elif lstatus == 'minor': lstatus = "\x02\x037Shakey\x03\x02" elif lstatus == 'good': lstatus = "\x02\x033Online\x03\x02" timezone = get_timezone(bot.db, bot.config, None, trigger.nick) if not timezone: timezone = 'UTC' lastcomm['created_on'] = format_time(bot.db, bot.config, timezone, trigger.nick, trigger.sender, from_utc(lastcomm['created_on'])) return bot.say('[Github] Current Status: ' + status + ' | Last Message: ' + lstatus + ': ' + lastcomm['body'] + ' (' + lastcomm['created_on'] + ')') elif repo.lower() == 'rate-limit': return bot.say(fetch_api_endpoint(bot, 'https://api.github.com/rate_limit')) if '/' not in repo: repo = trigger.nick.strip() + '/' + repo URL = 'https://api.github.com/repos/%s' % (repo.strip()) fmt_response(bot, trigger, URL)
def duck_search(query): query = query.replace('!', '') uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query bytes = web.get(uri) # if 'web-result"' in bytes: # filter out the ads on top of the page # bytes = bytes.split('web-result"')[1] # m = r_duck.search(bytes) # if m: # return web.decode(m.group(1)) urls = [web.decode(x) for x in r_duck.findall(bytes)] return urls
def suggest(bot, trigger): """Suggest terms starting with given input""" if not trigger.group(2): return bot.reply("No query term.") query = trigger.group(2) uri = 'http://websitedev.de/temp-bin/suggest.pl?q=' answer = web.get(uri + query.replace('+', '%2B')) if answer: bot.say(answer) else: bot.reply('Sorry, no result.')
def get_rate(code): if code.upper() == 'CAD': return 1, 'Canadian Dollar' elif code.upper() == 'BTC': rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) if headers['_http_status'] == 404: return False, False namespaces = { 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb', 'http://purl.org/rss/1.0/': None, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' } xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF') namestring = xml.get('channel').get('title').get('#text') name = namestring[len('Bank of Canada noon rate: '):] name = re.sub(r'\s*\(noon\)\s*', '', name) rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text') return float(rate), name
def isup(bot, trigger): site = trigger.group(2) if not site: if bot.config.lang == 'fr': return bot.reply("Quel website veux-tu que je verifie?") elif bot.config.lang == 'es': return bot.reply("Que web quieres que compruebe?") else: return bot.reply("What web do you want to check?") if 'localhost' in site or '127.0.0.1' in site or '0::1' in site: bot.reply("I'm minding on not say you it.") return elif site[:6] != 'http://' and site[:7] != 'https://': if '://' in site: protocol = site.split('://')[0] + '://' if bot.config.lang == 'fr': return bot.reply("Tournez à tenter sans le %s" % protocol) elif bot.config.lang == 'es': return bot.reply("Vuelve a intentar sin el %s" % protocol) else: return bot.reply("Try it again without the %s" % protocol) else: if is_http_url(site) is False: return bot.reply("That URL looks not valid for me.") site = 'http://' + site try: response = web.get(site) except Exception: if bot.config.lang == 'fr': bot.say('Sembla que ' + site + ' ne fonctionne pas ou n\'existe pas.') elif bot.config.lang == 'es': bot.say('Parece que ' + site + ' no funciona o no existe.') else: bot.say(site + ' looks down from here.') return if response: if bot.config.lang == 'fr': bot.say('Il n\'y a pas d\'aucun problème à ' + site) elif bot.config.lang == 'es': bot.say('No veo ningun problema en ' + site) else: bot.say(site + ' looks fine to me.') else: if bot.config.lang == 'fr': bot.say('Semble que ' + site + ' ne fonctionne pas ou n\'existe pas.') elif bot.config.lang == 'es': bot.say('Parece que ' + site + ' no funciona o no existe.') else: bot.say(site + ' looks down from here.') return
def request(self, input): """ Sends a request to the API. Only publicly available data is accessible. Returns data as JSON. """ headers = { 'Authorization': 'Client-ID ' + self.client_id, 'Accept': 'application/json' } request = web.get(self.api_url + input, headers=headers) #FIXME: raise for status return json.loads(request)
def urbansearch(bot, searchterm): """Search Urban Dictionary.""" try: data = web.get("http://api.urbandictionary.com/v0/define?term={0}".format(web.quote(searchterm))) data = json.loads(data) except: return osd(bot, trigger.sender, 'say', "Error connecting to urban dictionary") if data['result_type'] == 'no_results': return "No results found for {0}".format(searchterm) result = data['list'][0] url = 'http://www.urbandictionary.com/define.php?term={0}'.format(web.quote(searchterm)) response = "{0} - {1}".format(result['definition'].strip()[:256], url) return response
def py(bot, trigger): """Evaluate a Python expression.""" if not trigger.group(2): return bot.say("Need an expression to evaluate") query = trigger.group(2) uri = BASE_TUMBOLIA_URI + 'py/' answer = web.get(uri + web.quote(query)) if answer: #bot.say can potentially lead to 3rd party commands triggering. bot.reply(answer) else: bot.reply('Sorry, no result.')