def unmatched_url(match, chan, db): disabled_commands = database.get(db, 'channels', 'disabled', 'chan', chan) try: r = requests.get(match, headers=headers, allow_redirects=True, stream=True) except Exception as e: return formatting.output('URL', ['Error: {}'.format(e)]) domain = urlparse(match).netloc if r.status_code != 404: content_type = r.headers['Content-Type'] try: encoding = r.headers['content-encoding'] except: encoding = '' if content_type.find("html") != -1: # and content_type is not 'gzip': data = '' for chunk in r.iter_content(chunk_size=1024): data += chunk if len(data) > 48336: break body = html.fromstring(data) try: title = body.xpath('//title/text()')[0] except: return formatting.output('URL', ['No Title ({})'.format(domain)]) try: title_formatted = text.fix_bad_unicode( body.xpath('//title/text()')[0]) except: title_formatted = body.xpath('//title/text()')[0] return formatting.output( 'URL', ['{} ({})'.format(title_formatted, domain)]) else: if disabled_commands: if 'filesize' in disabled_commands: return try: if r.headers['Content-Length']: length = int(r.headers['Content-Length']) if length < 0: length = 'Unknown size' else: length = formatting.filesize(length) else: length = "Unknown size" except: length = "Unknown size" if "503 B" in length: length = "" if length is None: length = "" return formatting.output( 'URL', ['{} Size: {} ({})'.format(content_type, length, domain)]) return
def beats(inp): """beats -- Gets the current time in .beats (Swatch Internet Time). """ if inp.lower() == "wut": return "Instead of hours and minutes, the mean solar day is divided " \ "up into 1000 parts called \".beats\". Each .beat lasts 1 minute and" \ " 26.4 seconds. Times are notated as a 3-digit number out of 1000 af" \ "ter midnight. So, @248 would indicate a time 248 .beats after midni" \ "ght representing 248/1000 of a day, just over 5 hours and 57 minute" \ "s. There are no timezones." elif inp.lower() == "guide": return u"1 day = 1000 .beats, 1 hour = 41.666 .beats, 1 min = 0.6944 .beats, 1 second = 0.01157 .beats" t = time.gmtime() h, m, s = t.tm_hour, t.tm_min, t.tm_sec utc = 3600 * h + 60 * m + s bmt = utc + 3600 # Biel Mean Time (BMT) beat = bmt / 86.4 if beat > 1000: beat -= 1000 return formatting.output('Swatch Internet Time', ['@{0:.2f}'.format(beat)])
def snopes(inp): """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" search_page = http.get_html(search_url, sp_q=inp, sp_c="1") result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "no matching pages found" snopes_page = http.get_html(result_urls[0]) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title() claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace status = re.sub(r"[\s\xa0]+", " ", status) return formatting.output('Snopes', ['{} {} {}'.format(claim, status, result_urls[0])])
def timefunction(inp, nick="", reply=None, db=None, notice=None): "time [location] [dontsave] | [@ nick] -- Gets time for <location>." save = True if '@' in inp: nick = inp.split('@')[1].strip() location = database.get(db,'users','location','nick',nick) if not location: return "No location stored for {}.".format(nick.encode('ascii', 'ignore')) else: location = database.get(db,'users','location','nick',nick) if not inp: if not location: notice(time.__doc__) return else: # if not location: save = True if " dontsave" in inp: save = False location = inp.split()[0] # now, to get the actual time try: url = "https://www.google.com/search?q=time+in+%s" % location.replace(' ','+').replace(' save','') html = http.get_html(url) prefix = html.xpath("//div[contains(@class,'vk_c vk_gy')]//span[@class='vk_gy vk_sh']/text()")[0].strip() curtime = html.xpath("//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_bk vk_ans']/text()")[0].strip() day = html.xpath("//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_gy vk_sh']/text()")[0].strip() date = html.xpath("//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_gy vk_sh']/span/text()")[0].strip() except IndexError: return "Could not get time for that location." if location and save: database.set(db,'users','location',location,'nick',nick) return formatting.output('Time', [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
def timefunction2(inp, nick="", reply=None, db=None, notice=None): "time [location] [dontsave] | [@ nick] -- Gets time for <location>." save = True if '@' in inp: nick = inp.split('@')[1].strip() location = database.get(db,'users','location','nick',nick) if not location: return "No location stored for {}.".format(nick.encode('ascii', 'ignore')) else: location = database.get(db,'users','location','nick',nick) if not inp: if not location: notice(time.__doc__) return else: # if not location: save = True if " dontsave" in inp: save = False location = inp # now, to get the actual time url = "https://time.is/%s" % location.replace(' ','_').replace(' save','') try: request = urllib2.Request(url, None, headers) page = urllib2.urlopen(request).read() soup = BeautifulSoup(page, 'lxml') soup = soup.find('div', attrs={'id': re.compile('time_section')}) time = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('twd')}).renderContents().strip())) details = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('dd')}).renderContents().strip())) prefix = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('msgdiv')}).renderContents().strip())) except IndexError: return "Could not get time for that location." return formatting.output('Time', [u'{} {}, {}'.format(prefix.decode('ascii', 'ignore'), time, details)])
def geoip(inp): "geoip <host/ip> -- Gets the location of <host/ip>" try: record = geo.record_by_name(inp) except: return "Sorry, I can't locate that in my database." data = {} if "region_name" in record: # we try catching an exception here because the region DB is missing a few areas # it's a lazy patch, but it should do the job try: data["region"] = ", " + regions[record["country_code"]][ record["region_name"]] except: data["region"] = "" else: data["region"] = "" data["cc"] = record["country_code"] or "N/A" data["country"] = record["country_name"] or "Unknown" data["city"] = record["city"] or "Unknown" return formatting.output('GeoIP', [ '\x02Country:\x02 {country} ({cc}) \x02City:\x02 {city}{region}'. format(**data) ])
def pomfadd(inp, chan=None, nick=None, notice=None, db=None, say=None): "pomfadd <word> <url> -- Downloads file, uploads it and adds it to the dictionary" dfile, url = inp.split(None, 1) pomfurl = upload(url) strsave = "{} {}".format(dfile, pomfurl) datafiles.add(strsave, notice) return(formatting.output('pomf', ['{} remembered as {}'.format(pomfurl, dfile)]))
def pomfremember(inp, chan=None, nick=None, say=None, db=None, adminonly=True): "pomfremember <word> <url> -- Downloads file, uploads it and adds it to the dictionary" word, url = inp.split(None, 1) pomfurl = upload(url) strsave = "{} {}".format(word, pomfurl) hashtags.remember(strsave, nick, db) return(formatting.output('pomf', ['{} remembered as {}'.format(word, pomfurl)]))
def unmatched_url(match,chan,db): disabled_commands = database.get(db,'channels','disabled','chan',chan) try: r = requests.get(match, headers=headers,allow_redirects=True, stream=True) except Exception as e: return formatting.output('URL', ['Error: {}'.format(e)]) domain = urlparse(match).netloc if r.status_code != 404: content_type = r.headers['Content-Type'] try: encoding = r.headers['content-encoding'] except: encoding = '' if content_type.find("html") != -1: # and content_type is not 'gzip': data = '' for chunk in r.iter_content(chunk_size=1024): data += chunk if len(data) > 48336: break body = html.fromstring(data) try: title = body.xpath('//title/text()')[0] except: return formatting.output('URL', ['No Title ({})'.format(domain)]) try: title_formatted = text.fix_bad_unicode(body.xpath('//title/text()')[0]) except: title_formatted = body.xpath('//title/text()')[0] return formatting.output('URL', ['{} ({})'.format(title_formatted, domain)]) else: if disabled_commands: if 'filesize' in disabled_commands: return try: if r.headers['Content-Length']: length = int(r.headers['Content-Length']) if length < 0: length = 'Unknown size' else: length = formatting.filesize(length) else: length = "Unknown size" except: length = "Unknown size" if "503 B" in length: length = "" if length is None: length = "" return formatting.output('URL', ['{} Size: {} ({})'.format(content_type, length, domain)]) return
def pomfadd(inp, chan=None, nick=None, notice=None, db=None, say=None): "pomfadd <word> <url> -- Downloads file, uploads it and adds it to the dictionary" dfile, url = inp.split(None, 1) pomfurl = upload(url) strsave = "{} {}".format(dfile, pomfurl) datafiles.add(strsave, notice) return (formatting.output('pomf', ['{} remembered as {}'.format(pomfurl, dfile)]))
def pomfremember(inp, chan=None, nick=None, say=None, db=None, adminonly=True): "pomfremember <word> <url> -- Downloads file, uploads it and adds it to the dictionary" word, url = inp.split(None, 1) pomfurl = upload(url) strsave = "{} {}".format(word, pomfurl) hashtags.remember(strsave, nick, db) return (formatting.output('pomf', ['{} remembered as {}'.format(word, pomfurl)]))
def timefunction(inp, nick="", reply=None, db=None, notice=None): "time [location] [dontsave] | [@ nick] -- Gets time for <location>." save = True if '@' in inp: nick = inp.split('@')[1].strip() location = database.get(db, 'users', 'location', 'nick', nick) if not location: return "No location stored for {}.".format( nick.encode('ascii', 'ignore')) else: location = database.get(db, 'users', 'location', 'nick', nick) if not inp: if not location: notice(time.__doc__) return else: # if not location: save = True if " dontsave" in inp: save = False location = inp.split()[0] # now, to get the actual time try: url = "https://www.google.com/search?q=time+in+{}".format( location.replace(' ', '+').replace(' save', '')) request = urllib2.Request(url, None, headers) page = urllib2.urlopen(request).read() soup = BeautifulSoup(page, 'lxml') soup = soup.find('div', attrs={'id': re.compile('ires')}) time = filter( None, http.strip_html( soup.find('div', attrs={ 'class': re.compile('vk_gy') }).renderContents().strip()).split(' ')) prefix = ' '.join(time[6:]) curtime = time[0] day = time[1] date = ' '.join(time[2:4]) except IndexError: return "Could not get time for that location." if location and save: database.set(db, 'users', 'location', location, 'nick', nick) return formatting.output( 'Time', [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
def timefunction(inp, nick="", reply=None, db=None, notice=None): "time [location] [dontsave] | [@ nick] -- Gets time for <location>." save = True if '@' in inp: nick = inp.split('@')[1].strip() location = database.get(db, 'users', 'location', 'nick', nick) if not location: return "No location stored for {}.".format( nick.encode('ascii', 'ignore')) else: location = database.get(db, 'users', 'location', 'nick', nick) if not inp: if not location: notice(time.__doc__) return else: # if not location: save = True if " dontsave" in inp: save = False location = inp.split()[0] # now, to get the actual time try: url = "https://www.google.com/search?q=time+in+%s" % location.replace( ' ', '+').replace(' save', '') html = http.get_html(url) prefix = html.xpath( "//div[contains(@class,'vk_c vk_gy')]//span[@class='vk_gy vk_sh']/text()" )[0].strip() curtime = html.xpath( "//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_bk vk_ans']/text()" )[0].strip() day = html.xpath( "//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_gy vk_sh']/text()" )[0].strip() date = html.xpath( "//div[contains(@class,'vk_c vk_gy')]//div[@class='vk_gy vk_sh']/span/text()" )[0].strip() except IndexError: return "Could not get time for that location." if location and save: database.set(db, 'users', 'location', location, 'nick', nick) return formatting.output( 'Time', [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
def spartist(inp, bot=None): """spartist <artist> -- Search Spotify for <artist>""" # Get access token try: access_token = get_access_token( bot.config['api_keys']['spotify_client_id'], bot.config['api_keys']['spotify_client_secret']) except Exception as e: return "Could not get Spotify access token: {}".format(e) # Query artist try: headers = {'Authorization': 'Bearer ' + access_token} params = {'q': inp, 'type': 'artist', 'limit': 1} data = requests.get('https://api.spotify.com/v1/search', headers=headers, params=params) data = data.json() print(data) except Exception as e: return "Could not get artist information: {}".format(e) # Parsing data and returning try: first_result = data["artists"]["items"][0] artist = first_result["name"].encode("utf-8") genres = ', '.join(first_result["genres"]).encode("utf-8") url = first_result["external_urls"]["spotify"] uri = first_result["uri"] # Spotify has genre tags for many artists but not all if genres: artist_query_output = "\x02{}\x02, \x02Genres\x02: {} - {} ({})".format( artist, genres, url, uri) else: artist_query_output = "\x02{}\x02 - {} ({})".format( artist, url, uri) except IndexError: return "Could not find artist." return formatting.output('Spotify', [artist_query_output])
def spalbum(inp, bot=None): """spalbum <album> -- Search Spotify for <album>""" # Get access token try: access_token = get_access_token( bot.config['api_keys']['spotify_client_id'], bot.config['api_keys']['spotify_client_secret']) except Exception as e: return "Could not get Spotify access token: {}".format(e) # Query artist try: headers = {'Authorization': 'Bearer ' + access_token} params = {'q': inp, 'type': 'album', 'limit': 1} data = requests.get('https://api.spotify.com/v1/search', headers=headers, params=params) data = data.json() except Exception as e: return "Could not get album information: {}".format(e) # Parsing data and returning try: first_result = data["albums"]["items"][0] artists = [] for a in first_result["artists"]: artists.append(a["name"]) artist = ', '.join(artists).encode("utf-8") album = first_result["name"].encode("utf-8") url = first_result["external_urls"]["spotify"] uri = first_result["uri"] album_query_output = "\x02{}\x02 - \x02{}\x02 - {} ({})".format( artist, album, url, uri) except IndexError: return "Could not find album." return formatting.output('Spotify', [album_query_output])
def timefunction(inp, nick="", reply=None, db=None, notice=None): "time [location] [dontsave] | [@ nick] -- Gets time for <location>." save = True if '@' in inp: nick = inp.split('@')[1].strip() location = database.get(db,'users','location','nick',nick) if not location: return "No location stored for {}.".format(nick.encode('ascii', 'ignore')) else: location = database.get(db,'users','location','nick',nick) if not inp: if not location: notice(time.__doc__) return else: # if not location: save = True if " dontsave" in inp: save = False location = inp.split()[0] # now, to get the actual time try: url = "https://www.google.com/search?q=time+in+{}".format(location.replace(' ','+').replace(' save','')) request = urllib2.Request(url, None, headers) page = urllib2.urlopen(request).read() soup = BeautifulSoup(page, 'lxml') soup = soup.find('div', attrs={'id': re.compile('ires')}) time = filter(None, http.strip_html(soup.find('div', attrs={'class': re.compile('vk_gy')}).renderContents().strip()).split(' ')) prefix = ' '.join(time[6:]) curtime = time[0] day = time[1] date = ' '.join(time[2:4]) except IndexError: return "Could not get time for that location." if location and save: database.set(db,'users','location',location,'nick',nick) return formatting.output('Time', [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
def wolframalpha(inp, bot=None): """wa <query> -- Computes <query> using Wolfram Alpha.""" server = 'http://api.wolframalpha.com/v2/query.jsp' api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return formatting.output('WolframAlpha', ['error: missing api key']) import time start = time.clock() scantimeout = '3.0' podtimeout = '4.0' formattimeout = '8.0' async = 'True' waeo = WolframAlphaEngine(api_key, server) waeo.ScanTimeout = scantimeout waeo.PodTimeout = podtimeout waeo.FormatTimeout = formattimeout waeo.Async = async
def geoip(inp): "geoip <host/ip> -- Gets the location of <host/ip>" try: record = geo.record_by_name(inp) except: return "Sorry, I can't locate that in my database." data = {} if "region_name" in record: # we try catching an exception here because the region DB is missing a few areas # it's a lazy patch, but it should do the job try: data["region"] = ", " + regions[record["country_code"]][record["region_name"]] except: data["region"] = "" else: data["region"] = "" data["cc"] = record["country_code"] or "N/A" data["country"] = record["country_name"] or "Unknown" data["city"] = record["city"] or "Unknown" return formatting.output('GeoIP', ['\x02Country:\x02 {country} ({cc}) \x02City:\x02 {city}{region}'.format(**data)])
def pomf(url): "pomf <url> -- Downloads file and uploads it" return formatting.output('pomf', [upload(url)])
formattimeout = '8.0' async = 'True' waeo = WolframAlphaEngine(api_key, server) waeo.ScanTimeout = scantimeout waeo.PodTimeout = podtimeout waeo.FormatTimeout = formattimeout waeo.Async = async query = waeo.CreateQuery(http.quote_plus(inp)) result = waeo.PerformQuery(query) waeqr = WolframAlphaQueryResult(result) results = [] pods = waeqr.Pods() for pod in pods: waep = Pod(pod) subpods = waep.Subpods() for subpod in subpods: waesp = Subpod(subpod) plaintext = waesp.Plaintext() results.append(plaintext) try: waquery = re.sub(' (?:\||) +', ' ', ' '.join(results[0][0].splitlines())).strip() waresult = ' '.join(results[1][0].splitlines()) return formatting.output('WolframAlpha', [waquery, waresult]) except: return formatting.output('WolframAlpha', [random.choice(errors)])
formattimeout = '8.0' async = 'True' waeo = WolframAlphaEngine(api_key, server) waeo.ScanTimeout = scantimeout waeo.PodTimeout = podtimeout waeo.FormatTimeout = formattimeout waeo.Async = async query = waeo.CreateQuery(http.quote_plus(inp)) result = waeo.PerformQuery(query) waeqr = WolframAlphaQueryResult(result) results = [] pods = waeqr.Pods() for pod in pods: waep = Pod(pod) subpods = waep.Subpods() for subpod in subpods: waesp = Subpod(subpod) plaintext = waesp.Plaintext() results.append(plaintext) try: waquery = re.sub(' (?:\||) +', ' ', ' '.join(results[0][0].splitlines())).strip().replace(u'\xc2 ', '') waresult = ' '.join(results[1][0].splitlines()).replace(u'\xc2 ', '') print(waquery, waresult) return formatting.output('WolframAlpha', [waquery.encode('utf-8'), waresult.encode('utf-8')]) except: return formatting.output('WolframAlpha', [random.choice(errors)])
query = waeo.CreateQuery(http.quote_plus(inp)) result = waeo.PerformQuery(query) waeqr = WolframAlphaQueryResult(result) results = [] pods = waeqr.Pods() for pod in pods: waep = Pod(pod) subpods = waep.Subpods() for subpod in subpods: waesp = Subpod(subpod) plaintext = waesp.Plaintext() results.append(plaintext) try: waquery = re.sub(' (?:\||) +', ' ', ' '.join(results[0][0].splitlines())).strip().replace( u'\xc2 ', '') if results[1][0] == [] or u'irreducible' in results[1][0]: waresult = ' '.join(results[2][0].splitlines()).replace( u'\xc2 ', '') else: waresult = ' '.join(results[1][0].splitlines()).replace( u'\xc2 ', '') return formatting.output( 'WolframAlpha', [waquery.encode('utf-8'), waresult.encode('utf-8')]) except: return formatting.output('WolframAlpha', [random.choice(errors)])
def spotify_url(match, bot=None): """ Match spotify urls and provide blurb and track """ # Regex match on spotify urls and see if url links to track/album/artist type = match.group(2) spotify_id = match.group(3) url = spuri.format(type, spotify_id) # Get access token try: access_token = get_access_token( bot.config['api_keys']['spotify_client_id'], bot.config['api_keys']['spotify_client_secret']) except Exception as e: return "Could not get Spotify access token: {}".format(e) # Set appropriate headers headers = {'Authorization': 'Bearer ' + access_token} # Parse track link and retrieve data for blurb if type == "track": try: data = requests.get( 'https://api.spotify.com/v1/tracks/{}'.format(spotify_id), headers=headers) data = data.json() except Exception as e: return "Could not get album information: {}".format(e) try: first_result = data artists = [] for a in first_result["artists"]: artists.append(a["name"]) artist = ', '.join(artists).encode("utf-8") track = first_result["name"].encode("utf-8") album = first_result["album"]["name"].encode("utf-8") song_query_output = "\"{}\" by \x02{}\x02 from the album \x02{}\x02".format( track, artist, album) except IndexError: return "Could not find track." return formatting.output('Spotify', [song_query_output]) # Parse album link and retrieve data for blurb if type == "album": try: data = requests.get( 'https://api.spotify.com/v1/albums/{}'.format(spotify_id), headers=headers) data = data.json() except Exception as e: return "Could not get album information: {}".format(e) try: first_result = data artists = [] for a in first_result["artists"]: artists.append(a["name"]) artist = ', '.join(artists).encode("utf-8") album = first_result["name"].encode("utf-8") album_query_output = "\x02{}\x02 - \x02{}\x02".format( artist, album) except IndexError: return "Could not find album." return formatting.output('Spotify', [album_query_output]) # Parse artist link and retrieve data for blurb if type == "artist": try: data = requests.get( 'https://api.spotify.com/v1/artists/{}'.format(spotify_id), headers=headers) data = data.json() except Exception as e: return "Could not get artist information: {}".format(e) try: first_result = data artist = first_result["name"].encode("utf-8") genres = ', '.join(first_result["genres"]).encode("utf-8") # Spotify has genre tags for many artists but not all if genres: artist_query_output = "\x02{}\x02, \x02Genres\x02: {}".format( artist, genres) else: artist_query_output = "\x02{}\x02".format(artist) except IndexError: return "Could not find artist." return formatting.output('Spotify', [artist_query_output])