def osu_beatmap(bot, trigger): data = '?k=%s&%s=%s' % (bot.config.osu.api_key, str(trigger.group(1)), str(trigger.group(2))) #bot.say(url) raw = web.get('https://osu.ppy.sh/api/get_beatmaps' + data) topscore = None if trigger.group(1) == 'b': rawscore = web.get('https://osu.ppy.sh/api/get_scores' + data) topscore = json.loads(rawscore)[0] response = json.loads(raw) if not response[0]: bot.say('[' + color('osu!', u'13') + '] ' + ' Invalid link') return beatmap = response[0] m, s = divmod(int(beatmap['total_length']), 60) output = [ '[', color('osu!', u'13'), '] ', beatmap['artist'], ' - ', beatmap['title'], ' (Mapped by ', beatmap['creator'], ') | ', str(m), 'm, ', str(s), 's', ' | ', beatmap['version'], ' | Difficulty: ', beatmap['difficultyrating'], ' | ', beatmap['bpm'], ' BPM' ] if topscore: output += (' | High Score: ' + topscore['score'] + ' (' + topscore['rank'] + ') - ' + topscore['username']) bot.say(''.join(output))
def get_rate(code): if code == 'CAD': return 1, 'Canadian Dollar' elif code == 'BTC': rates = json.loads( web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' elif code == 'XDG': rates = json.loads(web.get('http://dogecoinaverage.com/USD.json')) return 1 / rates['CAD']['24h_avg'], 'Dogecoin-24hr average' data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) if headers['_http_status'] == 404: return False, False xml = etree.fromstring(data) namestring = xml.find('{http://purl.org/rss/1.0/}channel/' '{http://purl.org/rss/1.0/}title').text name = namestring[len('Bank of Canada noon rate: '):] name = re.sub(r'\s*\(noon\)\s*', '', name) rate = xml.find( '{http://purl.org/rss/1.0/}item/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}statistics/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}exchangeRate/' '{http://www.cbwiki.net/wiki/index.php/Specification_1.1}value').text return float(rate), name
def radio(bot, trigger): """ Radio functions, valid parameters: on, off, song, now, next, soon, stats. """ global checkSongs, current_song, radioURL if not radioURL: if not hasattr(bot.config, "radio"): bot.say("Radio module not configured") return else: radioURL = bot.config.radio.url + "%s?sid=" + bot.config.radio.sid try: args = trigger.group(2).lower().split(" ") except AttributeError: bot.say("Usage: .radio (next|now|off|on|song|soon|stats)") return if args[0] == "on": if bot.privileges[trigger.sender][trigger.nick] < OP: return if checkSongs != 0: return bot.reply("Radio data checking is already on.") if not getAPI(bot, trigger): checkSongs = 0 return bot.say("Radio data checking not enabled.") checkSongs = 10 while checkSongs: last = current_song try: current_song = web.get(radioURL % "currentsong") nextsong = web.get(radioURL % "nextsong") except Exception as e: checkSongs -= 1 if checkSongs == 0: bot.debug(__file__, "Exception while trying to get periodic radio data: %s" % e, "warning") bot.say("The radio is not responding to the song request.") bot.say("Turning off radio data checking.") break if not current_song == last: if not current_song: csong = "The radio is currently offline." else: csong = "Now Playing: " + current_song if nextsong and current_song: bot.say(csong + " | Coming Up: " + nextsong) else: bot.say(csong) sleep(5) elif args[0] == "off": if bot.privileges[trigger.sender][trigger.nick] < OP: return if checkSongs == 0: bot.reply("Radio data checking is already off.") return checkSongs = 0 current_song = "" bot.reply("Turning off radio data checking.") elif args[0] == "song" or args[0] == "now": currentSong(bot, trigger) elif args[0] == "next" or args[0] == "soon": nextSong(bot, trigger) elif args[0] == "stats": getAPI(bot, trigger)
def radio(bot, trigger): """ Radio functions, valid parameters: on, off, song, now, next, soon, stats. """ global checkSongs, current_song, radioURL if not radioURL: if not hasattr(bot.config, 'radio'): bot.say('Radio module not configured') return else: radioURL = bot.config.radio.url + '%s?sid=' + bot.config.radio.sid try: args = trigger.group(2).lower().split(' ') except AttributeError: bot.say('Usage: .radio (next|now|off|on|song|soon|stats)') return if args[0] == 'on': if bot.privileges[trigger.sender][trigger.nick] < OP: return if checkSongs != 0: return bot.reply('Radio data checking is already on.') if not getAPI(bot, trigger): checkSongs = 0 return bot.say('Radio data checking not enabled.') checkSongs = 10 while checkSongs: last = current_song try: current_song = web.get(radioURL % 'currentsong') nextsong = web.get(radioURL % 'nextsong') except Exception as e: checkSongs -= 1 if checkSongs == 0: bot.debug(__file__, 'Exception while trying to get periodic radio data: %s' % e, 'warning') bot.say('The radio is not responding to the song request.') bot.say('Turning off radio data checking.') break if not current_song == last: if not current_song: csong = 'The radio is currently offline.' else: csong = 'Now Playing: ' + current_song if nextsong and current_song: bot.say(csong + ' | Coming Up: ' + nextsong) else: bot.say(csong) sleep(5) elif args[0] == 'off': if bot.privileges[trigger.sender][trigger.nick] < OP: return if checkSongs == 0: bot.reply('Radio data checking is already off.') return checkSongs = 0 current_song = '' bot.reply('Turning off radio data checking.') elif args[0] == 'song' or args[0] == 'now': currentSong(bot, trigger) elif args[0] == 'next' or args[0] == 'soon': nextSong(bot, trigger) elif args[0] == 'stats': getAPI(bot, trigger)
def osu_beatmap(bot, trigger): data = '?k=%s&%s=%s' % (bot.config.osu.api_key, str( trigger.group(1)), str(trigger.group(2))) #bot.say(url) raw = web.get('https://osu.ppy.sh/api/get_beatmaps' + data) topscore = None if trigger.group(1) == 'b': rawscore = web.get('https://osu.ppy.sh/api/get_scores' + data) topscore = json.loads(rawscore)[0] response = json.loads(raw) if not response[0]: bot.say('[' + color('osu!', u'13') + '] ' + ' Invalid link') return beatmap = response[0] m, s = divmod(int(beatmap['total_length']), 60) output = [ '[', color('osu!', u'13'), '] ', beatmap['artist'], ' - ', beatmap['title'], ' (Mapped by ', beatmap['creator'], ') | ', str(m), 'm, ', str(s), 's', ' | ', beatmap['version'], ' | Difficulty: ', beatmap['difficultyrating'], ' | ', beatmap['bpm'], ' BPM' ] if topscore: output += (' | High Score: ' + topscore['score'] + ' (' + topscore['rank'] + ') - ' + topscore['username']) bot.say(''.join(output))
def report_bug(bot, bugno): entry = json.loads(web.get(query_url % bugno)) bot.say('Bug %s%s %s' % (bug_url, bugno, ', '.join( (entry.get('assigned_to', {'name': '---'})['name'], entry.get('status', '---'), entry.get('resolution', '---'), entry.get('priority', '---'), entry.get('summary', '---'))))) return data = web.get(url, dont_decode=True) bug = etree.fromstring(data).find('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) bot.say(message)
def radio(bot, trigger): """ Radio functions, valid parameters: on, off, song, now, next, soon, stats. """ global checkSongs, current_song, radioURL if not radioURL: if not hasattr(bot.config, 'radio'): bot.say('Radio module not configured') return else: radioURL = bot.config.radio.url + '%s?sid=' + bot.config.radio.sid try: args = trigger.group(2).lower().split(' ') except AttributeError: bot.say('Usage: .radio (next|now|off|on|song|soon|stats)') return if args[0] == 'on': if not trigger.isop: return if checkSongs != 0: return bot.reply('Radio data checking is already on.') if not getAPI(bot, trigger): checkSongs = 0 return bot.say('Radio data checking not enabled.') checkSongs = 10 while checkSongs: last = current_song try: current_song = web.get(radioURL % 'currentsong') nextsong = web.get(radioURL % 'nextsong') except Exception as e: checkSongs -= 1 if checkSongs == 0: bot.debug(__file__, 'Exception while trying to get periodic radio data: %s' % e, 'warning') bot.say('The radio is not responding to the song request.') bot.say('Turning off radio data checking.') break if not current_song == last: if not current_song: csong = 'The radio is currently offline.' else: csong = 'Now Playing: ' + current_song if nextsong and current_song: bot.say(csong + ' | Coming Up: ' + nextsong) else: bot.say(csong) sleep(5) elif args[0] == 'off': if not trigger.isop: return if checkSongs == 0: bot.reply('Radio data checking is already off.') return checkSongs = 0 current_song = '' bot.reply('Turning off radio data checking.') elif args[0] == 'song' or args[0] == 'now': currentSong(bot, trigger) elif args[0] == 'next' or args[0] == 'soon': nextSong(bot, trigger) elif args[0] == 'stats': getAPI(bot, trigger)
def lastfm(willie, trigger): user = trigger.group(2) apikey = str(willie.config.lastfm.apikey) if not (user and user != ''): if trigger.nick in willie.db.preferences: user = willie.db.preferences.get(trigger.nick, 'lastfm_user') if not user: willie.reply("Invalid username given or no username set. Use .fmset to set a username.") return #username variable prepared for insertion into REST string quoted_user = web.quote(user) #json formatted output for recent track recent_page = web.get("http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&user=%s&api_key=%s&format=json" % (quoted_user, apikey)) recent_track = json.loads(recent_page)['recenttracks']['track'][0] #artist and track name pulled from recent_track quoted_artist = web.quote(recent_track['artist']['#text']) quoted_track = web.quote(recent_track['name']) #json formatted track info trackinfo_page = web.get("http://ws.audioscrobbler.com/2.0/?method=track.getInfo&artist=%s&track=%s&username=%s&api_key=%s&format=json" % (quoted_artist, quoted_track, quoted_user, apikey)) #track playcount and loved stats trackinfo = json.loads(trackinfo_page)['track'] try: playcount = trackinfo['userplaycount'] except KeyError: playcount = "unknown" loved = int(trackinfo['userloved']) try: if loved > 0: willie.say('\x035' + u'\u2665' +'\x03 %s - %s - (%s plays)' % (recent_track['artist']['#text'], recent_track['name'], playcount)) else: willie.say(u'\u266A' + ' %s - %s (%s plays)' % (recent_track['artist']['#text'], recent_track['name'], playcount)) except KeyError: willie.say("Couldn't find any recent tracks")
def search(title): response = '[{}]' if is_integer(title.strip()): response = web.get('https://mal-api.test.ramblingahoge.net/anime/' + web.quote(title), verify_ssl=False) return json.loads('[' + response + ']') else: response = web.get('https://mal-api.test.ramblingahoge.net/anime/search?q=' + web.quote(title), verify_ssl=False) return json.loads(response)
def search(title): response = '[{}]' if is_integer(title.strip()): response = web.get('https://mal-api.test.ramblingahoge.net/anime/'+web.quote(title), verify_ssl=False) return json.loads('['+response+']') else: response = web.get('https://mal-api.test.ramblingahoge.net/anime/search?q='+web.quote(title), verify_ssl=False) return json.loads(response)
def get_content(phrase, mode, period="day"): subreddit = phrase.lower() if " " in phrase: subreddit_find_string = phrase.replace(" ", "+") if not resolved_subreddit.has_key(subreddit_find_string): url = json.loads( web.get("http://www.reddit.com/subreddits/search.json?q={0}". format(subreddit_find_string))) result = [ x['data']['display_name'] for x in url['data']['children'] if x.has_key('data') and x['data'].has_key('display_name') and x['data']['subreddit_type'] != "private" ] if len(result) > 0: subreddit = result[0].lower() resolved_subreddit[subreddit_find_string] = subreddit else: return "I looked for a public subreddit matching that phrase but didn't find one.", None else: subreddit = resolved_subreddit[subreddit_find_string] if not last_seen.has_key(subreddit): last_seen[subreddit] = {} url = "http://www.reddit.com/r/{0}/search.json?q=site%3Aimgur.com&restrict_sr=on&sort={1}&t={2}".format( subreddit, mode, period) get = web.get(url, timeout=5) try: array = json.loads(get) except ValueError: return "{0} doesn't look like a subreddit to me.".format( subreddit), subreddit if 'error' in array: if array['error'] == 404: return "{0} isn\'t a real subreddit.".format(subreddit), subreddit elif array['error'] == 403: return "{0} is a private subreddit.".format(subreddit), subreddit else: return "Unknown error. Whoops." else: links = [] iterator = 0 if 'children' in array['data'] and len(array['data']['children']) > 0: while (len(links) < 10) and (iterator < len(array)): for child in array['data']['children']: iterator = iterator + 1 if child['data']['domain'] == 'i.imgur.com': if 'over_18' in child['data']: id = child['data']['id'] if last_seen[subreddit].has_key(id): child['data']['lastseen'] = last_seen[ subreddit][id] else: child['data']['lastseen'] = 0 links.append(child['data']) return links, subreddit
def github_repo(bot, trigger, match=None): match = match or trigger repo = match.group(2) or match.group(1) if repo.lower() == "status": current = json.loads(web.get("https://status.github.com/api/status.json")) lastcomm = json.loads(web.get("https://status.github.com/api/last-message.json")) status = current["status"] if status == "major": status = "\x02\x034Broken\x03\x02" elif status == "minor": status = "\x02\x037Shakey\x03\x02" elif status == "good": status = "\x02\x033Online\x03\x02" lstatus = lastcomm["status"] if lstatus == "major": lstatus = "\x02\x034Broken\x03\x02" elif lstatus == "minor": lstatus = "\x02\x037Shakey\x03\x02" elif lstatus == "good": lstatus = "\x02\x033Online\x03\x02" timezone = get_timezone(bot.db, bot.config, None, trigger.nick) if not timezone: timezone = "UTC" lastcomm["created_on"] = format_time( bot.db, bot.config, timezone, trigger.nick, trigger.sender, from_utc(lastcomm["created_on"]) ) return bot.say( "[Github] Current Status: " + status + " | Last Message: " + lstatus + ": " + lastcomm["body"] + " (" + lastcomm["created_on"] + ")" ) elif repo.lower() == "rate-limit": return bot.say(fetch_api_endpoint(bot, "https://api.github.com/rate_limit")) if "/" not in repo: repo = trigger.nick.strip() + "/" + repo URL = "https://api.github.com/repos/%s" % (repo.strip()) fmt_response(bot, trigger, URL)
def get_content(phrase, mode, period = "day"): subreddit = phrase.lower() if " " in phrase: subreddit_find_string = phrase.replace(" ", "+") if not resolved_subreddit.has_key(subreddit_find_string): url = json.loads(web.get("http://www.reddit.com/subreddits/search.json?q={0}".format(subreddit_find_string))) result = [x['data']['display_name'] for x in url['data']['children'] if x.has_key('data') and x['data'].has_key('display_name') and x['data']['subreddit_type'] != "private"] if len(result) > 0: subreddit = result[0].lower() resolved_subreddit[subreddit_find_string] = subreddit else: return "I looked for a public subreddit matching that phrase but didn't find one.", None else: subreddit = resolved_subreddit[subreddit_find_string] if not last_seen.has_key(subreddit): last_seen[subreddit] = {} url = "http://www.reddit.com/r/{0}/search.json?q=site%3Aimgur.com&restrict_sr=on&sort={1}&t={2}".format(subreddit, mode, period) get = web.get(url, timeout=5) try: array = json.loads(get) except ValueError: return "{0} doesn't look like a subreddit to me.".format(subreddit), subreddit if 'error' in array: if array['error'] == 404: return "{0} isn\'t a real subreddit.".format(subreddit), subreddit elif array['error'] == 403: return "{0} is a private subreddit.".format(subreddit), subreddit else: return "Unknown error. Whoops." else: links = [] iterator = 0 if 'children' in array['data'] and len(array['data']['children']) > 0: while (len(links) < 10) and (iterator < len(array)): for child in array['data']['children']: iterator = iterator + 1 if child['data']['domain'] == 'i.imgur.com': if 'over_18' in child['data']: id = child['data']['id'] if last_seen[subreddit].has_key(id): child['data']['lastseen'] = last_seen[subreddit][id] else: child['data']['lastseen'] = 0 links.append(child['data']) return links, subreddit
def fetch_video_info(bot, id): """Retrieves video metadata from YouTube""" url = INFO_URL.format(get_api_key(bot), id) raw, headers = web.get(url, return_headers=True) if headers['_http_status'] == 403: bot.say(u'[YouTube Search] Access denied. Check that your API key is ' u'configured to allow access to your IP address.') return try: result = json.loads(raw) except ValueError as e: raise YouTubeError(u'Failed to decode: ' + raw) if 'error' in result: raise YouTubeError(result['error']['message']) if len(result['items']) == 0: raise YouTubeError('YouTube API returned empty result') video = result['items'][0] info = { 'title': video['snippet']['title'], # 'uploader': video['snippet']['channelTitle'], # 'uploaded': convert_date(video['snippet']['publishedAt']), 'duration': convert_duration(video['contentDetails']['duration']), 'views': video['statistics']['viewCount'], # 'comments': video['statistics']['commentCount'], # 'likes': video['statistics']['likeCount'], # 'dislikes': video['statistics']['dislikeCount'], 'link': 'https://youtu.be/' + video['id'] } return info
def bing_search(query, lang='en-GB'): query = web.quote(query) base = 'http://www.bing.com/search?mkt=%s&q=' % lang bytes = web.get(base + query) m = r_bing.search(bytes) if m: return m.group(1)
def wiktionary(word): bytes = web.get(uri % web.quote(word.encode('utf-8'))) bytes = r_ul.sub('', bytes) mode = None etymology = None definitions = {} for line in bytes.splitlines(): if 'id="Etymology"' in line: mode = 'etymology' elif 'id="Noun"' in line: mode = 'noun' elif 'id="Verb"' in line: mode = 'verb' elif 'id="Adjective"' in line: mode = 'adjective' elif 'id="Adverb"' in line: mode = 'adverb' elif 'id="Interjection"' in line: mode = 'interjection' elif 'id="Particle"' in line: mode = 'particle' elif 'id="Preposition"' in line: mode = 'preposition' elif 'id="' in line: mode = None elif (mode == 'etmyology') and ('<p>' in line): etymology = text(line) elif (mode is not None) and ('<li>' in line): definitions.setdefault(mode, []).append(text(line)) if '<hr' in line: break return etymology, definitions
def wikt(word): bytes = web.get(uri % web.quote(word)) bytes = r_ul.sub('', bytes) mode = None etymology = None definitions = {} for line in bytes.splitlines(): if 'id="Etymology"' in line: mode = 'etymology' elif 'id="Noun"' in line: mode = 'noun' elif 'id="Verb"' in line: mode = 'verb' elif 'id="Adjective"' in line: mode = 'adjective' elif 'id="Adverb"' in line: mode = 'adverb' elif 'id="Interjection"' in line: mode = 'interjection' elif 'id="Particle"' in line: mode = 'particle' elif 'id="Preposition"' in line: mode = 'preposition' elif 'id="' in line: mode = None elif (mode == 'etmyology') and ('<p>' in line): etymology = text(line) elif (mode is not None) and ('<li>' in line): definitions.setdefault(mode, []).append(text(line)) if '<hr' in line: break return etymology, definitions
def show_bug(bot, trigger, match=None, bug_url=None): """Show information about a Bugzilla bug.""" if bug_url is None: print(match, trigger) match = match or trigger domain = match.group(1) if domain not in bot.config.bugzilla.get_list('domains'): return url = 'https://%s%sctype=xml&%s' % match.groups() else: url = bug_url print(url) data = web.get(url) bug = etree.fromstring(data).find('bug') if bug.get('error'): bot.reply("That bug id is sketchy, man.") return message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) bot.say(message)
def fetch_video_info(bot, id): """Retrieves video metadata from YouTube""" url = INFO_URL.format(get_api_key(bot), id) raw = web.get(url) result = json.loads(raw) if 'error' in result: raise YouTubeError(result['error']['message']) if len(result['items']) == 0: raise YouTubeError('YouTube API returned empty result') video = result['items'][0] info = { 'title': video['snippet']['title'], 'uploader': video['snippet']['channelTitle'], 'uploaded': convert_date(video['snippet']['publishedAt']), 'duration': convert_duration(video['contentDetails']['duration']), 'views': video['statistics']['viewCount'], 'comments': video['statistics']['commentCount'], 'likes': video['statistics']['likeCount'], 'dislikes': video['statistics']['dislikeCount'], 'link': 'https://youtu.be/' + video['id'] } return info
def isup(bot, trigger): """isup.me website status checker""" site = trigger.group(2) if not site: return bot.reply("What site do you want to check?") if site[:6] != 'http://' and site[:7] != 'https://': if '://' in site: protocol = site.split('://')[0] + '://' return bot.reply("Try it again without the %s" % protocol) else: site = 'http://' + site if not '.' in site: site += ".com" try: response = web.get(site) except Exception: bot.say(site + ' looks down from here.') return if response: bot.say(site + ' looks fine to me.') else: bot.say(site + ' is down from here.')
def movie(bot, trigger): """ Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. """ if not trigger.group(2): return word = trigger.group(2).rstrip() uri = "http://www.imdbapi.com/?t=" + word u = web.get(uri, 30) data = json.loads( u) # data is a Dict containing all the information we need if data['Response'] == 'False': if 'Error' in data: message = '[MOVIE] %s' % data['Error'] else: LOGGER.warning( 'Got an error from the imdb api, search phrase was %s; data was %s', word, str(data)) message = '[MOVIE] Got an error from imdbapi' else: message = '[MOVIE] Title: ' + data['Title'] + \ ' | Year: ' + data['Year'] + \ ' | Rating: ' + data['imdbRating'] + \ ' | Genre: ' + data['Genre'] + \ ' | IMDB Link: http://imdb.com/title/' + data['imdbID'] bot.say(message)
def info(genre, title): if ".mp3" not in title: title = title + ".mp3" genre = urllib.unquote(genre) title = urllib.unquote(title) request = web.get("http://jpv.everythingisawesome.us/api/v1/song/" + genre + "/" + title) return json.loads(request)
def search(bot, trigger, title): request = web.get("http://jpv.everythingisawesome.us/api/v1/search/" + title) songs = json.loads(request) if len(songs) > 0: if len(songs) > 5: bot.say( "[JPV] Search results limited to 3 results, see http://jpv.everythingisawesome.us/api/v1/search/" + title + " for a full list" ) songs = songs[:3] for song in songs: bot.say( "[JPV] " + song["artist"] + " - " + song["title"] + " | " + song["album"] + " | " + song["album_artist"] + " | " + song["genre"] + " | http://jpv.everythingisawesome.us/song/?song={}".format(song["href"].replace(".mp3", "")) ) else: bot.say('[JPV] Unable to find a song matching "' + trigger.group(2).strip() + '"')
def ytsearch(bot, trigger): """ .youtube <query> - Search YouTube """ if not trigger.group(2): return uri = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&q=' + trigger.group(2) raw = web.get(uri + '&key=' + bot.config.google.public_key) vid = json.loads(raw)['items'][0]['id']['videoId'] uri = 'https://www.googleapis.com/youtube/v3/videos?id=' + vid + '&part=contentDetails,snippet,statistics' video_info = ytget(bot, trigger, uri) if video_info is None: return message = ('[YT Search]' + ' Title: ' + video_info['snippet']['title'] + ' | Uploader: ' + video_info['snippet']['channelTitle'] + ' | Uploaded: ' + video_info['snippet']['publishedAt'] + ' | Duration: ' + video_info['contentDetails']['duration'] + ' | Views: ' + video_info['statistics']['viewCount'] + ' | Comments: ' + video_info['statistics']['commentCount'] + ' | ' + color(video_info['statistics']['likeCount'] + '+', colors.GREEN) + ' | ' + color(video_info['statistics']['dislikeCount'] + '-', colors.RED) + ' | Link: https://youtu.be/' + video_info['id']) bot.say(message)
def mw_snippet(server, query, bot): """ Retrives a snippet of the specified length from the given page on the given server. """ if bot.config.lang == 'ca': snippet_url = ('https://ca.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') elif bot.config.lang == 'es': snippet_url = ('https://es.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') else: snippet_url = ('https://en.wikipedia.org/w/api.php?format=json' '&action=query&prop=extracts&exintro&explaintext' '&exchars=300&redirects&titles=') if bot.config.lang == 'ca' or bot.config.lang == 'es': snippet_url += web.quote(query.encode('utf-8')) else: snippet_url += web.quote(query.encode('cp1252')) snippet = json.loads(web.get(snippet_url)) snippet = snippet['query']['pages'] # For some reason, the API gives the page *number* as the key, so we just # grab the first page number in the results. snippet = snippet[snippet.keys()[0]] return snippet['extract']
def ytsearch(bot, trigger): """ .youtube <query> - Search YouTube """ if not trigger.group(2): return uri = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&q=' + trigger.group( 2) raw = web.get(uri + '&key=' + bot.config.google.public_key) vid = json.loads(raw)['items'][0]['id']['videoId'] uri = 'https://www.googleapis.com/youtube/v3/videos?id=' + vid + '&part=contentDetails,snippet,statistics' video_info = ytget(bot, trigger, uri) if video_info is None: return message = ( '[YT Search]' + ' Title: ' + video_info['snippet']['title'] + ' | Uploader: ' + video_info['snippet']['channelTitle'] + ' | Uploaded: ' + video_info['snippet']['publishedAt'] + ' | Duration: ' + video_info['contentDetails']['duration'] + ' | Views: ' + video_info['statistics']['viewCount'] + ' | Comments: ' + video_info['statistics']['commentCount'] + ' | ' + color(video_info['statistics']['likeCount'] + '+', colors.GREEN) + ' | ' + color(video_info['statistics']['dislikeCount'] + '-', colors.RED) + ' | Link: https://youtu.be/' + video_info['id']) bot.say(message)
def find_title(url): """Return the title for the given URL.""" content = web.get(url) # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r'<\1title>', content) content = quoted_title.sub('', content) start = content.find('<title>') end = content.find('</title>') if start == -1 or end == -1: return title = content[start + 7:end] title = title.strip()[:200] def get_unicode_entity(match): entity = match.group() if entity.startswith('&#x'): cp = int(entity[3:-1], 16) elif entity.startswith('&#'): cp = int(entity[2:-1]) else: cp = name2codepoint[entity[1:-1]] return unichr(cp) title = r_entity.sub(get_unicode_entity, title) title = ' '.join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub('', title) return title or None
def find_title(url=None, content=None): """Return the title for the given URL. Copy of find_title that allows for avoiding duplicate requests.""" if (not content and not url) or (content and url): raise ValueError('url *or* content needs to be provided to find_title') if url: try: content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes) except UnicodeDecodeError: return # Fail silently when data can't be decoded assert content # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r'<\1title>', content) content = quoted_title.sub('', content) start = content.find('<title>') end = content.find('</title>') if start == -1 or end == -1: return title = web.decode(content[start + 7:end]) title = title.strip()[:200] title = ' '.join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub('', title) return title or None
def wa(bot, trigger): """Wolfram Alpha calculator""" if not trigger.group(2): return bot.reply("No search term.") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query.replace('+', '%2B')), 45) except timeout as e: return bot.say('[WOLFRAM ERROR] Request timed out') if answer: answer = answer.decode('unicode_escape') answer = HTMLParser.HTMLParser().unescape(answer) # This might not work if there are more than one instance of escaped # unicode chars But so far I haven't seen any examples of such output # examples from Wolfram Alpha match = re.search('\\\:([0-9A-Fa-f]{4})', answer) if match is not None: char_code = match.group(1) char = unichr(int(char_code, 16)) answer = answer.replace('\:' + char_code, char) waOutputArray = answer.split(";") if(len(waOutputArray) < 2): if(answer.strip() == "Couldn't grab results from json stringified precioussss."): # Answer isn't given in an IRC-able format, just link to it. bot.say('[WOLFRAM]Couldn\'t display answer, try http://www.wolframalpha.com/input/?i=' + query.replace(' ', '+')) else: bot.say('[WOLFRAM ERROR]' + answer) else: bot.say('[WOLFRAM] ' + waOutputArray[0] + " = " + waOutputArray[1]) waOutputArray = [] else: bot.reply('Sorry, no result.')
def ytget(bot, trigger, uri): if not bot.config.has_section( 'google') or not bot.config.google.public_key: return None bytes = web.get(uri + '&key=' + bot.config.google.public_key) try: result = json.loads(bytes) except ValueError: return None result = result['items'][0] splitdur = ISO8601_PERIOD_REGEX.match(result['contentDetails']['duration']) dur = [] for k, v in splitdur.groupdict().iteritems(): if v is not None: dur.append(v.lower()) result['contentDetails']['duration'] = ' '.join(dur) pubdate = datetime.datetime.strptime(result['snippet']['publishedAt'], '%Y-%m-%dT%H:%M:%S.%fZ') result['snippet']['publishedAt'] = pubdate.strftime('%D %T') for k in result['statistics']: result['statistics'][k] = '{:,}'.format(long(result['statistics'][k])) return result
def etymology(word): # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) > 25: if bot.config.lang == 'ca': raise ValueError("Paraula massa llarga: %s[...]" % word[:10]) elif bot.config.lang == 'es': raise ValueError("Palabra demasiado larga: %s[...]" % word[:10]) else: raise ValueError("Too long word: %s[...]" % word[:10]) word = {'axe': 'ax/axe'}.get(word, word) bytes = web.get(etyuri % word) definitions = r_definition.findall(bytes) if not definitions: return None defn = text(definitions[0]) m = r_sentence.match(defn) if not m: return None sentence = m.group(0) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' [...]' sentence = '"' + sentence.replace('"', "'") + '"' return sentence + ' - ' + (etyuri % word)
def duck_search(query): query = query.replace('!', '') uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query bytes = web.get(uri) m = r_duck.search(bytes) if m: return web.decode(m.group(1))
def ytsearch(bot, trigger): """Allows users to search for YouTube videos with .yt <search query>""" if not trigger.group(2): return # Note that web.get() quotes the query parameters, so the # trigger is purposely left unquoted (double-quoting breaks things) url = SEARCH_URL.format(get_api_key(bot), trigger.group(2)) result = json.loads(web.get(url)) if 'error' in result: bot.say(u'[YouTube Search] ' + result['error']['message']) return if len(result['items']) == 0: bot.say(u'[YouTube Search] No results for ' + trigger.group(2)) return # YouTube v3 API does not include useful video metadata in search results. # Searching gives us the video ID, now we have to do a regular lookup to # get the information we want. try: info = fetch_video_info(bot, result['items'][0]['id']['videoId']) except YouTubeError as e: bot.say(u'[YouTube] Lookup failed: {}'.format(e)) return bot.say(format_info('YouTube Search', info, include_link=True))
def get_steam_info(url): # we get the soup manually because the steam pages have some odd encoding troubles headers = {'Cookie': 'lastagecheckage=1-January-1992; birthtime=694252801'} page = web.get(url, headers=headers) soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8") name = soup.find('div', {'class': 'apphub_AppName'}).text desc = ": " + soup.find('div', { 'class': 'game_description_snippet' }).text.strip() desc = (desc[:127] + '...') if len(desc) > 130 else desc # the page has a ton of returns and tabs rel_date = soup.find('span', {'class': 'date'}).text.strip() tags = soup.find('div', { 'class': 'glance_tags' }).text.strip().replace(u'Free to Play', '').replace(u'+', '').split() genre = " - Genre: " + u', '.join(tags[:4]) date = " - Release date: " + rel_date.replace(u"Release Date: ", u"") price = soup.find('div', { 'class': 'game_purchase_price price' }).text.strip() if not "Free to Play" in price: price = "Price: " + price price = " - " + price return u'[Steam] {}{}{}{}{}'.format(name, desc, genre, date, price)
def find_anime(bot, anime): url = 'http://hummingbird.me/api/v1/search/anime?query=' raw = web.get(url + anime) try: data = json.loads(raw) except: return bot.say(u'[Hummingbird] No anime found matching \'' + anime + '\'') if len(data) < 1: return bot.say(u'[Hummingbird] No anime found matching \'' + anime + '\'') else: data = data[0] if 'error' in data: return bot.say(u'[Hummingbird] An error occurred (' + data['error'] + ')') output = u'[Hummingbird] {title} | {show_type} | Rating: {rating} | Episodes: {episode_count} | {age_rating} | {url}' if data['community_rating'] != 0: data['rating'] = str(int(round(data['community_rating'] * 20))) + '%' else: data['rating'] = '-' bot.say(output.format(**data))
def amazon_url(bot, trigger): item = html.fromstring(web.get(trigger.group(1))) try: title = item.xpath("//span[@id='productTitle']/text()")[0] except: title = item.xpath("//span[@id='btAsinTitle']/text()")[0] try: price = item.xpath("//span[@id='priceblock_ourprice']/text()")[0] except: try: price = item.xpath("//span[@id='priceblock_saleprice']/text()")[0] except: try: price = item.xpath("//b[@class='priceLarge']/text()")[0] except: price = "$?" try: rating = item.xpath("//div[@id='avgRating']/span/text()")[0].strip() except: rating = item.xpath("//div[@class='gry txtnormal acrRating']/text()")[0].strip() try: breadcrumb = ' '.join(item.xpath("//li[@class='breadcrumb']")[0].text_content().split()) except: breadcrumb = "Unknown" star_count = round(float(rating.split(' ')[0]), 0) stars = '' for x in xrange(0, int(star_count)): stars += u'\u2605' for y in xrange(int(star_count), 5): stars += u'\u2606' out = ['[Amazon]', title, '|', breadcrumb, '|', stars, '|', price] bot.say(' '.join(out))
def info(genre, title): if '.mp3' not in title: title = title + '.mp3' genre = urllib.unquote(genre) title = urllib.unquote(title) request = web.get('http://jpv.everythingisawesome.us/api/v1/song/' + genre + '/' + title) return json.loads(request)
def show_bug(willie, trigger): """Show information about a Bugzilla bug.""" domain = trigger.group(1) if domain not in willie.config.bugzilla.get_list('domains'): return url = 'https://%s%sctype=xml&%s' % trigger.groups() data = web.get(url) bug = etree.fromstring(data).find('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') if bug.find('resolution') is not None: status = bug.find('bug_status').text + ' ' + bug.find('resolution').text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) willie.say(message)
def checkSchedule(bot): print("Checking for new schedule...") bytes = web.get(handmadeScheduleURI) reader = csv.DictReader(bytes.split("\n"), ["date", "time", "description"]) result = "" for row in reader: rowSplit = row["date"].split("-") date = arrow.now().replace(year=int(rowSplit[0]), month=int(rowSplit[1]), day=int(rowSplit[2])) #date = arrow.get(date, defaultTz) streams = getStreamsOnDay(date) time,flag = dateParser.parseDT(row["time"], sourceTime=date.datetime) time = arrow.get(time, defaultTz) off_day = row["description"] == "off" for stream in streams: ## Don't worry, this is usually 1 element long (if it's not, we have some refactoring to do elsewhere!) if(off_day): #kill the stream StreamEpisode.delete(getID(stream)) pass if(abs(stream.start - time) > timedelta(0)): #print("%s now at %s (was %s), " % (time.strftime("%b %d %Y"), time.strftime("%I:%M%p"), stream._get_start().strftime("%I:%M%p"))) scheduleStream(time) result += "%s now at %s (was %s), " % (time.strftime("%b %d %Y"), time.strftime("%I:%M%p"), stream._get_start().strftime("%I:%M%p")) #print(date, streams) if (len(streams) == 0 and off_day == False): #print("No stream on %s, added one at %s" % (time.strftime("%b %d %Y"), time.strftime("%I:%M%p"))) scheduleStream(time) result += time.strftime("%b %d %Y at %I:%M%p %Z") + ", " if (len(result) > 0): result = result[:-2] for channel in bot.channels: #bot.msg(channel, "Updated the stream schedule: %s" % result) pass
def getAPI(willie, trigger): #contact the 'heavyweight' XML API try: raw = web.get(radioURL % 'stats') except Exception as e: willie.say('The radio is not responding to the stats request.') return 0 #Parse the XML XML = parseString(raw).documentElement status = XML.getElementsByTagName('STREAMSTATUS')[0].firstChild.nodeValue if status == '0': willie.say('The radio is currently offline.') return 0 status = 'Online' servername = XML.getElementsByTagName('SERVERTITLE')[0].firstChild.nodeValue curlist = XML.getElementsByTagName('CURRENTLISTENERS')[0].firstChild.nodeValue maxlist = XML.getElementsByTagName('MAXLISTENERS')[0].firstChild.nodeValue #Garbage disposal XML.unlink() #print results willie.say('[%s]Status: %s. Listeners: %s/%s.' % (servername, status, curlist, maxlist)) return 1
def mw_snippet(server, query): """ Retrives a snippet of the specified length from the given page on the given server. """ #snippet_url = ('https://en.wikipedia.org/w/api.php?format=json' # '&action=query&prop=extracts&exintro&explaintext' # '&exchars=300&redirects&titles=') snippet_url = ('http://en.wikipedia.org/w/api.php?action=parse' '&format=json' '&redirects' '&text={{automatic taxobox|taxon = %s}}' % query) #print snippet_url #snippet_url += web.quote(query.encode('utf-8')) snippet = json.loads(web.get(snippet_url)) print json.dumps(snippet_url) print json.dumps(snippet) snippet = snippet['parse']['text'] # For some reason, the API gives the page *number* as the key, so we just # grab the first page number in the results. snippet = snippet[snippet.keys()[0]] return snippet
def etymology(word): # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) > 25: raise ValueError("Word too long: %s[...]" % word[:10]) word = {'axe': 'ax/axe'}.get(word, word) bytes = web.get(etyuri % word) definitions = r_definition.findall(bytes) if not definitions: return None defn = text(definitions[0]) m = r_sentence.match(defn) if not m: return None sentence = m.group(0) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' [...]' sentence = '"' + sentence.replace('"', "'") + '"' return sentence + ' - ' + (etyuri % word)
def wa(bot, trigger): """Wolfram Alpha calculator""" if not trigger.group(2): return bot.reply("No search term.") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query.replace('+', '%2B')), 45) except timeout as e: return bot.say('[WOLFRAM ERROR] Request timed out') if answer: answer = answer.decode('string_escape') answer = HTMLParser.HTMLParser().unescape(answer) # This might not work if there are more than one instance of escaped # unicode chars But so far I haven't seen any examples of such output # examples from Wolfram Alpha match = re.search('\\\:([0-9A-Fa-f]{4})', answer) if match is not None: char_code = match.group(1) char = unichr(int(char_code, 16)) answer = answer.replace('\:' + char_code, char) waOutputArray = string.split(answer, ";") if(len(waOutputArray) < 2): bot.say('[WOLFRAM ERROR]' + answer) else: bot.say('[WOLFRAM] ' + waOutputArray[0] + " = " + waOutputArray[1]) waOutputArray = [] else: bot.reply('Sorry, no result.')
def horrible(bot, trigger): """ .horrible [show] - View the latest HorribleSubs releases for any given show """ latest = False query = trigger.group(2) if not query: latest = True url = 'http://horriblesubs.info/lib/search.php?value={}' if latest: url = 'http://horriblesubs.info/lib/latest.php' soup = BeautifulSoup(web.get(url.format(query)), 'lxml') ep = soup.find_all('div', {'class': 'episode'}) if len(ep) > 0: for epi in ep: episode = ''.join([x for x in epi.contents if isinstance(x, NavigableString)]) resblock = epi.find_all('div', {'class': 'linkful resolution-block'}) resolutions = [] links = [] for res in resblock: links.extend([link.find('a')['href'] for link in res.find_all('span', {'class': 'ind-link'}) if 'Torrent' in link.text]) resolutions.append(res.find('a', {'href': '#'}).text) bot.say('Latest: {} | Resolutions: {} | Download: {} ({})'.format(episode, ', '.join(resolutions), links[-1], resolutions[-1])) return bot.say('[Horrible] No results found')
def show_bug(bot, trigger, match=None): """Show information about a Bugzilla bug.""" match = match or trigger domain = match.group(1) if not bot.config.has_section('bugzilla') or domain not in bot.config.bugzilla.get_list('domains'): return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url, dont_decode=True) bug = etree.fromstring(data).find('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) bot.say(message)
def show_bug(willie, trigger, match=None): """Show information about a Bugzilla bug.""" match = match or trigger domain = match.group(1) if domain not in willie.config.bugzilla.get_list('domains'): return url = 'https://%s%sctype=xml&%s' % match.groups() data = web.get(url) bug = etree.fromstring(data).find('bug') message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 'Importance: %s | Status: %s | Assigned to: %s | ' + 'Reported: %s | Modified: %s') resolution = bug.find('resolution') if resolution is not None and resolution.text: status = bug.find('bug_status').text + ' ' + resolution.text else: status = bug.find('bug_status').text message = message % ( bug.find('short_desc').text, bug.find('product').text, bug.find('component').text, bug.find('version').text, (bug.find('priority').text + ' ' + bug.find('bug_severity').text), status, bug.find('assigned_to').text, bug.find('creation_ts').text, bug.find('delta_ts').text) willie.say(message)
def status(bot, trigger): """ .status <server> - Grabs information about a minecraft server! """ try: server = MinecraftServer.lookup(trigger.group(3).strip()) except Exception: bot.say(u'[MCS] Unable to find a Minecraft server running at \'{}\''. format(trigger.group(3).strip())) try: status = server.status() desc = ' '.join(re.sub(u'\u00A7.', '', status.description).split()) bot.say(u'[MCS] {0} | {1} players | {2} ms | {3}'.format( trigger.group(3).strip(), status.players.online, status.latency, desc)) except Exception as e: try: raw = web.get('http://minespy.net/api/serverping/' + str(server.host) + ':' + str(server.port)) status = json.loads(raw) bot.say(u'[MCS] {0} | {1} players | {2} ms | {3}'.format( trigger.group(3).strip(), str(status['online']), str(status['latency']), str(status['strippedmotd']))) except Exception as e: bot.say(u'[MCS] Unable to fetch info from \'{}\' ({})'.format( trigger.group(3).strip(), e))
def getAPI(willie, trigger): #contact the 'heavyweight' XML API try: raw = web.get(radioURL % 'stats') except Exception as e: willie.say('The radio is not responding to the stats request.') return 0 #Parse the XML XML = parseString(raw).documentElement status = XML.getElementsByTagName('STREAMSTATUS')[0].firstChild.nodeValue if status == '0': willie.say('The radio is currently offline.') return 0 status = 'Online' servername = XML.getElementsByTagName( 'SERVERTITLE')[0].firstChild.nodeValue curlist = XML.getElementsByTagName( 'CURRENTLISTENERS')[0].firstChild.nodeValue maxlist = XML.getElementsByTagName('MAXLISTENERS')[0].firstChild.nodeValue #Garbage disposal XML.unlink() #print results willie.say('[%s]Status: %s. Listeners: %s/%s.' % (servername, status, curlist, maxlist)) return 1
def wa(bot, trigger): """Wolfram Alpha calculator""" if not trigger.group(2): return bot.reply("No search term.") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query.replace('+', '%2B')), 45) except timeout as e: return bot.say('[WOLFRAM ERROR] Request timed out') if answer: answer = answer.decode('string_escape') answer = HTMLParser.HTMLParser().unescape(answer) # This might not work if there are more than one instance of escaped # unicode chars But so far I haven't seen any examples of such output # examples from Wolfram Alpha match = re.search('\\\:([0-9A-Fa-f]{4})', answer) if match is not None: char_code = match.group(1) char = unichr(int(char_code, 16)) answer = answer.replace('\:' + char_code, char) waOutputArray = string.split(answer, ";") if(len(waOutputArray) < 2): if(answer.strip() == "Couldn't grab results from json stringified precioussss."): # Answer isn't given in an IRC-able format, just link to it. bot.say('[WOLFRAM]Couldn\'t display answer, try http://www.wolframalpha.com/input/?i=' + query.replace(' ', '+')) else: bot.say('[WOLFRAM ERROR]' + answer) else: bot.say('[WOLFRAM] ' + waOutputArray[0] + " = " + waOutputArray[1]) waOutputArray = [] else: bot.reply('Sorry, no result.')
def osu_user(bot, trigger): """ .osu [user] - Show information on an osu! user """ data = '?k=%s&u=%s' % (bot.config.osu.api_key, str(trigger.group(2))) #bot.say(url) raw = web.get('https://osu.ppy.sh/api/get_user' + data) response = json.loads(raw) if not response: bot.say('[' + color('osu!', u'13') + '] ' + 'Invalid user') return if not response[0]: bot.say('[' + color('osu!', u'13') + '] ' + 'Invalid user') return user = response[0] level = 0 accuracy = 0 try: level = int(float(user['level'])) accuracy = int(float(user['accuracy'])) except: pass output = [ '[', color('osu!', u'13'), '] ', str(user['username']), ' | Level ', str(level), ' | Rank ', str(user['pp_rank']), ' | Play Count ', str(user['playcount']), ' | Ranked Score ', str(user['ranked_score']), ' | Total Score ', str(user['total_score']), ' | Accuracy ~', str(accuracy), '%' ] bot.say(''.join(output))
def get_def(path, num=0): url = UD_URL + path try: resp = json.loads(web.get(url)) except UnicodeError: definition = ('ENGLISH M**********R, DO YOU SPEAK IT?') return definition nom = num + 1 try: word = path[12:] except: pass if path.startswith("define?term=") and resp['result_type'] == 'no_results': definition = 'Definition %s not found!' % (word) else: try: item = resp['list'][num]['definition'].encode('utf8') thumbsup = resp['list'][num]['thumbs_up'] thumbsdown = resp['list'][num]['thumbs_down'] points = str(int(thumbsup) - int(thumbsdown)) total_nom = len(resp['list']) definition = "" if path == "random": word = resp['list'][num]['word'].encode('utf8') definition = 'Word: ' + str(word) + ' | ' definition = definition + 'Definition: ' + str(item)[1:].replace("\n", "") + " | Number: " + str(nom) + '/' + str(total_nom) + ' | Points: ' + points + ' (03' + str(thumbsup) + '|05' + str(thumbsdown) + ')' except IndexError: definition = ('Definition entry %s does' 'not exist for \'%s\'.' % (nom, word)) return definition
def wa(willie, trigger): """Wolfram Alpha calculator""" if not trigger.group(2): return willie.reply("No search term.") query = trigger.group(2) uri = 'http://tumbolia.appspot.com/wa/' try: answer = web.get(uri + web.quote(query.replace('+', '%2B')), 45) except timeout as e: return willie.say('[WOLFRAM ERROR] Request timed out') if answer: answer = answer.decode('string_escape') answer = HTMLParser.HTMLParser().unescape(answer) #This might not work if there are more than one instance of escaped #unicode chars But so far I haven't seen any examples of such output #examples from Wolfram Alpha match = re.search('\\\:([0-9A-Fa-f]{4})', answer) if match is not None: char_code = match.group(1) char = unichr(int(char_code, 16)) answer = answer.replace('\:' + char_code, char) waOutputArray = string.split(answer, ";") if(len(waOutputArray) < 2): willie.say('[WOLFRAM ERROR]' + answer) else: willie.say('[WOLFRAM] ' + waOutputArray[0] + " = " + waOutputArray[1]) waOutputArray = [] else: willie.reply('Sorry, no result.')
def find_title(url): """Return the title for the given URL.""" try: content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes) except UnicodeDecodeError: return # Fail silently when data can't be decoded # Some cleanup that I don't really grok, but was in the original, so # we'll keep it (with the compiled regexes made global) for now. content = title_tag_data.sub(r'<\1title>', content) content = quoted_title.sub('', content) start = content.find('<title>') end = content.find('</title>') if start == -1 or end == -1: return title = web.decode(content[start + 7:end]) title = title.strip()[:200] title = ' '.join(title.split()) # cleanly remove multiple spaces # More cryptic regex substitutions. This one looks to be myano's invention. title = re_dcc.sub('', title) return title or None
def api_bmark(bot, trigger, found_match=None, extra=None): url = found_match or trigger bytes = web.get(url) # XXX: needs a patch to the URL module title = find_title(content=bytes) if title is None: title = '[untitled]' data = { u'url': url, u'is_private': int(api_private), u'description': title.encode('utf-8'), u'content': bytes } if extra is not None: # extract #tags, uniquely # copied from http://stackoverflow.com/a/6331688/1174784 tags = {tag.strip("#") for tag in extra.split() if tag.startswith("#")} if tags: data['tags'] = ' '.join(tags) # strip tags from message and see what's left message = re.sub(r'#\w+', '', extra).strip() if message != '': # something more than hashtags was provided data['extended'] = extra return [title, get_hostname(url)] + list(api(bot, trigger, 'bmark', data))