def man(inp, say=''): """.man <command> [section] - Returns man page for specified command, section defaults to 1 if not specified.""" raw = inp.split() command = raw[0] if len(raw) == 2 and raw[1].isdigit(): page = raw[1] else: page = "1" try: manpage = str(http.get_html(base_url, topic=command, section=page)) # If not specified man page if re.match(r'.+(\>No matches for ").+', manpage): page = "all" manpage = str(http.get_html(base_url, topic=command, section=page)) # If man page exists for command if not re.match(r'.+(\>No matches for ").+', manpage) and 1 == 2: if page != "all": say("{} - {}({})".format(web.try_googl(base_url.format(command, page)), command, page)) else: say("{} - {}({}) (No section {})".format((web.try_googl(base_url.format(command, page)), command, page, raw[1]))) else: system_manpage = get_system_manpage(command) if system_manpage: haste_url = web.haste(system_manpage, ext='txt') googl_url = web.try_googl(haste_url) say("{} - {}".format(googl_url, command, page)) else: return "There is no man page for {}.".format(command) except Exception as e: # (http.HTTPError, http.URLError) as e: print(">>> u'HTTP Error: {}'".format(e)) return "HTTP Error, please try again in a few minutes."
def readtitle(match, say=None, nick=None): parsed_url = match.group().split(' ')[0] if any(word in parsed_url for word in skipurls): return try: request_url = http.get_html(parsed_url) except http.HTTPError as e: errors = {400: 'bad request (ratelimited?) 400', 401: 'unauthorized 401 ', 403: 'forbidden 403', 404: 'invalid user/id 404', 500: 'something is broken 500', 502: 'something is down ("getting upgraded?") 502', 503: 'something is overloaded 503', 410: 'something something 410'} if e.code == 404: return 'bad url?' if e.code in errors: return 'error: ' + errors[e.code] return 'error: unknown %s' % e.code try: titleget = request_url.xpath('//title/text()')[0] titleuni = " - " + unicode(titleget.strip()) except IndexError: titleuni = "" shorturl = web.try_googl(parsed_url) say(shorturl + titleuni)
def imdb(inp, api_key=None): """imdb <movie> [year] - Gets information about a movie from IMDb.""" year = "" if inp.split()[-1].isdigit(): inp, year = ' '.join(inp.split()[:-1]), inp.split()[-1] try: content = http.get_json("http://www.omdbapi.com/", apikey=api_key, t=inp, y=year, plot='short', r='json') except: return "API timeout, please try again in a few seconds." if content['Response'] == 'False': return content['Error'] elif content['Response'] == 'True': content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s' if content['Runtime'] != 'N/A': out += ' \x02%(Runtime)s\x02.' if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A': out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02 votes. ' out += web.try_googl('%(URL)s' % content) return out % content else: return 'Unknown error.'
def gis(inp, api_key=None): """.gis <term> - Finds an image using Google images (safesearch off).""" parsed = api_get(inp, api_key, is_image=True, num=10) if 'items' not in parsed: return 'no images found' return web.try_googl(random.choice(parsed['items'])['link'])
def get_info(url): if not url.startswith('//') and '://' not in url: url = 'http://' + url try: mimetype, encoding = mimetypes.guess_type(url) if mimetype and any( mimetype.startswith(t) for t in ['video', 'audio', 'image']): return web.try_googl(url), None title = http.get_title(url) title = u' '.join(re.sub(u'\r|\n', u' ', title).split()).strip('| ') return web.try_googl(url), title or None except Exception as e: return web.try_googl(url), None
def rfact(inp, say=False, nick=False): """.rfact - Gets a random fact from OMGFACTS.""" attempts = 0 # all of this is because omgfacts is fail while True: try: soup = http.get_soup('http://www.omg-facts.com/random') except: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue response = soup.find('a', {'class': 'surprise'}) link = response['href'] fact = ''.join(response.find(text=True)) if fact: fact = fact.strip() break else: if attempts > 2: return "Could not find a fact!" else: attempts += 1 continue url = web.try_googl(link) return "{} - {}".format(fact.encode('ascii', 'ignore'), url)
def first(inp, chan='', bot=None, db=None, say=None): """f[irst] [-G] <phrase> - Finds the first occurence of a phrase. Flag -G to search all channels.""" inp, _global = is_global(inp) if not inp: return "Check your input and try again." if _global: match_clause = tokenize.build_query(inp) else: match_clause = '{} AND chan:"{}"'.format(tokenize.build_query(inp), chan.strip('#')) try: row = db.execute("select uts, time, chan, nick, action, msg from logfts where logfts match ? and (msg not like '!%' and msg not like ';%' and msg not like '.%') and action = 'PRIVMSG' limit 1", (match_clause, )).fetchone() except OperationalError: return "Error: must contain one inclusive match clause (+/=)." if row: row = dict(zip(['uts', 'time', 'chan', 'nick', 'action', 'msg'], row)) row['date'] = row['time'].split(' ')[0] row['timesince'] = timesince.timesince(float(row['uts'])) if bot.config.get("logviewer_url"): row['log_url'] = web.try_googl(bot.config["logviewer_url"].format(row['chan'].strip('#'), *row['time'].split())) else: row['log_url'] = '' say(formats[row['action']].format(context='first said that', **row).strip()) else: say("Never!")
def outputsms(api_key, conn, bot, db, chan=None): global twilio_cache block = bot.config["sms"]["private"] messages = [] _messages = http.get_json(bot.config["messages_url"].format( api_key['account_sid'], api_key['number'])) for message in _messages: if message['MessageSid'] in twilio_cache: continue sender = message['From'][-10:] # Force number to fit our model sender_nick = get_name(db, sender) if sender_nick and all(x not in block for x in [sender, sender_nick.lower()]) \ and message['MessageSid'] not in [m['MessageSid'] for m in messages]: if message['MediaUrl']: media_uri = bot.config["gallery_url"].format( message['MessageSid']) message['out'] = u"<{}> [MMS] {} - {}".format( sender_nick, web.try_googl(media_uri), message['Body'] or "Presented without comment.") else: message['out'] = u"<{}> {}".format(sender_nick, message['Body']) messages.append(message) for message in messages: conn.send(u"PRIVMSG {} :{}".format( chan or bot.config['sms']['output_channel'], message['out'])) log_and_cache(message) # Mark all as read time.sleep(1) return len(messages)
def steam_url(match, say=None): app_id = match.group(1) try: if app_id: say(format_data(app_id)) else: raise Exception except: say("{} - Steam".format(web.try_googl(match.group(0))))
def steam(inp): """.steam [search] - Search for specified game/trailer/DLC.""" soup = http.get_soup("http://store.steampowered.com/search/?term={}".format(inp)) result = soup.find('a', {'class': 'search_result_row'}) try: return (get_steam_info(result['href']) + " - " + web.try_googl(result['href'])) except Exception as e: print "Steam search error: {}".format(e) return "Steam API error, please try again later."
def googleimage(inp, say=None, api_key=None): """gis <query> - Returns an image from Google Image results for <query>.""" try: parsed = custom_get(inp, api_key, is_image=True, num=1) except Exception as e: return "Error: {}".format(e) if 'items' not in parsed: return "No results" say(web.try_googl(parsed['items'][0]['link']))
def twitter_url(match, say=None, api_key=None): try: request_url = 'https://api.twitter.com/1.1/statuses/show.json' params = {'id': match.group(2), 'tweet_mode': 'extended'} tweet = http.get_json(request_url, query_params=params, oauth=True, oauth_keys=api_key) tweet['full_text'] = http.h.unescape(tweet['full_text']) if 1 < len([t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0]) < 5: tweet['full_text'] = re.sub(r'(.*?)(https:\/\/t.co\/.*)', r'\1\n\2', tweet['full_text']) say(u'{} - {} (@{}) on Twitter:'.format(web.try_googl(match.group(0)), tweet['user']['name'].encode('ascii', 'ignore'), tweet['user']['screen_name'])) for line in [t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0]: say(u' {}'.format(line)) else: say(u'{} - {} (@{}) on Twitter: "{}"'.format(web.try_googl(match.group(0)), tweet['user']['name'].encode('ascii', 'ignore'), tweet['user']['screen_name'], ' | '.join([t.strip() for t in tweet['full_text'].split('\n') if len(t.strip()) > 0]))) except: say("{} - Twitter".format(web.try_googl(match.group(0))))
def answer(inp): """.answer <query> - Find the answer to a question on Yahoo! Answers.""" query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1" result = web.query(query, {"query": inp.strip()}).one() short_url = web.try_googl(result["Link"]) # we split the answer and .join() it to remove newlines/extra spaces answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
def google(inp, say=None, api_key=None): """g[oogle] <query> - Returns first Google search result for <query>.""" try: parsed = custom_get(inp, api_key) except Exception as e: return "Error: {}".format(e) if 'items' not in parsed: return "No results" link = web.try_googl(parsed['items'][0]['link']) title = text.truncate_str(parsed['items'][0]['title'], 250) title = u' '.join(re.sub(u'\r|\n', u' ', title).split()).strip('| ') say(u"{} - \x02{}\x02".format(link, title))
def get_youtube_info(video_id, api_key, timestamp=None): params = { "id": video_id, "key": api_key['access'], "part": "snippet,contentDetails,statistics" } result = http.get_json(video_url, query_params=params) if result.get('error') or not result.get('items') or len(result['items']) < 1: return web.googl(short_url+video_id+(timestamp if timestamp else '')) + " - \x02Youtube\x0f" playtime = result['items'][0]['contentDetails']['duration'].strip('PT').lower() views = int(result['items'][0]['statistics'].get('viewCount', 0)) return output_format.format(url=web.try_googl(short_url+video_id+(timestamp if timestamp else '')), time=playtime, views=views, **result['items'][0]['snippet'])
def format_data(app_id, show_url=True): """ takes a steam appid and returns a formatted string with info :param appid: string :return: string """ try: data = http.get_json(API_URL, appids=app_id) except Exception as e: return "Could not get game info: {}".format(e) game = data[app_id]["data"] out = [] # basic info out.append(u"\x02{}\x02".format(game["name"])) desc = text.strip_html(game["about_the_game"]) #out.append(text.truncate_str(desc, 70)) # genres out.append(u", ".join([g['description'] for g in game["genres"]])) # pricing if game['is_free']: out.append(u"\x02free\x02") elif game.get('price_overview', False): price = game['price_overview'] if price['final'] == price['initial']: out.append(u"\x02$%d.%02d\x02" % divmod(price['final'], 100)) else: price_now = u"$%d.%02d" % divmod(price['final'], 100) price_original = u"$%d.%02d" % divmod(price['initial'], 100) out.append(u"\x02{}\x02 (was \x02{}\x02)".format( price_now, price_original)) # release date if game['release_date']['coming_soon']: out.append(u"coming \x02{}\x02".format(game['release_date']['date'])) else: out.append(u"released \x02{}\x02".format(game['release_date']['date'])) # url if show_url: url = web.try_googl(STORE_URL.format(game['steam_appid'])) out.insert(0, url) return u" - ".join(out)
def google(inp, api_key=None): """.g/.google <query> - Returns first Google search result.""" parsed = api_get(inp, api_key) if 'items' not in parsed: return 'no results found' link = web.try_googl(parsed['items'][0]['link']) title = parsed['items'][0]['title'] out = u'{} - \x02{}\x02'.format(link, title) out = ' '.join(out.split()) if len(out) > 300: out = out[:out.rfind(' ')] + '..."' return out
def sptfy(inp, sptfy=False): if sptfy: shortenurl = "http://sptfy.com/index.php" data = urlencode({'longUrl': inp, 'shortUrlDomain': 1, 'submitted': 1, "shortUrlFolder": 6, "customUrl": "", "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0}) try: soup = http.get_soup(shortenurl, post_data=data, cookies=True) except: return inp try: link = soup.find('div', {'class': 'resultLink'}).text.strip() return link except: message = "Unable to shorten URL: %s" % \ soup.find('div', {'class': 'messagebox_text'}).find('p').text.split("<br/>")[0] return message else: return web.try_googl(inp)
def seen(inp, chan='', nick='', bot=None, db=None, say=None, input=None): """seen [-G] <nick> - Tell when a nickname was last in active in IRC. Flag -G to search all channels.""" try: inp = inp.group(1) except: pass inp, _global = is_global(inp) if input.conn.nick.lower() == inp.lower(): return "You need to get your eyes checked." if inp.lower() == nick.lower(): return "Have you looked in a mirror lately?" if _global: row = db.execute("select uts, time, chan, nick, action, msg from logfts where logfts match ? order by cast(uts as decimal) desc limit 1", ('nick:^"{}" OR (action:"kick" AND msg:^"{}") OR (chan:"nick" AND msg:^"{}")'.format(inp, inp, inp),)).fetchone() else: row = db.execute("select uts, time, chan, nick, action, msg from logfts where logfts match ? order by cast(uts as decimal) desc limit 1", ('((chan:"{}" OR chan:"nick" OR chan:"quit") AND nick:^"{}") OR (chan:"{}" AND action:"kick" AND msg:^"{}") OR (chan:"nick" AND msg:^"{}")'.format(chan.strip('#'), inp, chan.strip('#'), inp, inp),)).fetchone() if row: row = dict(zip(['uts', 'time', 'chan', 'nick', 'action', 'msg'], row)) row['date'] = row['time'].split(' ')[0] row['timesince'] = timesince.timesince(float(row['uts'])) if bot.config.get("logviewer_url"): row['log_url'] = web.try_googl(bot.config["logviewer_url"].format(row['chan'].strip('#'), *row['time'].split())) else: row['log_url'] = '' if row['action'] == 'KICK': row['who'], row['msg'] = row['msg'].split(' ', 1) if inp.lower() != row['nick'].lower(): row['action'] = 'KICKEE' if row['action'] == 'NICK': if inp.lower() != row['nick'].lower(): row['action'] = 'NICKEE' say(formats[row['action']].format(context='was last seen', **row).strip()) else: return "I've never seen {}".format(inp)
def get_log_link(bot, db, q): if bot.config.get("logviewer_url"): try: id, quote, nick, uts = q match_clause = u'nick:"{}" AND msg:"quote {}"'.format( nick, quote.replace('"', '""')) row = db.execute( u"select chan, time from logfts where logfts match ? and substr(msg, 2, 5) = 'quote'", (match_clause, )).fetchone() chan, _datetime = row _date, _time = _datetime.split() except Exception as e: print('Error fetching quote #{}'.format(id)) return '???' return web.try_googl(bot.config["logviewer_url"].format( chan.strip('#'), _date, _time)) else: return ""
def stock(inp, api_key=None): """stock <symbol> - Looks up stock information""" params = {'function': 'GLOBAL_QUOTE', 'apikey': api_key, 'symbol': inp} quote = http.get_json(url, query_params=params) if not quote.get("Global Quote"): return "Unknown ticker symbol '{}'".format(inp) quote = { k.split(' ')[-1]: tryParse(v) for k, v in quote['Global Quote'].items() } quote['url'] = web.try_googl('https://finance.yahoo.com/quote/' + inp) try: quote['color'] = "5" if float(quote['change']) < 0 else "3" return "{symbol} - ${price:.2f} " \ "\x03{color}{change:+.2f} ({percent:.2f}%)\x0F " \ "H:${high:.2f} L:${low:.2f} O:${open:.2f} " \ "Volume:{volume:,.0f} - {url}".format(**quote) except: return "Error parsing return data, please try again later."
def wiki(inp, say=None): """wiki <phrase> - Gets first sentence of Wikipedia article on <phrase>.""" try: search_api = u'http://en.wikipedia.org/w/api.php' params = { 'action': 'query', 'list': 'search', 'format': 'json', 'srsearch': http.quote_plus(inp) } search = http.get_json(search_api, query_params=params) except: return 'Error accessing Wikipedia API, please try again in a few minutes.' if len(search['query']['search']) == 0: return 'Your query returned no results, please check your input and try again.' try: params = { 'format': 'json', 'action': 'query', 'prop': 'info|extracts', 'exintro': True, 'explaintext': True, 'exchars': 425, 'pageids': search['query']['search'][0]['pageid'], 'inprop': 'url', 'redirects': 1 } data = http.get_json(search_api, query_params=params) except: return 'Error accessing Wikipedia API, please try again in a few minutes.' data = data['query']['pages'][data['query']['pages'].keys()[0]] data['extract'] = data['extract'].strip('...').rsplit('.', 1)[0] + '.' say(u'{} - {}'.format(web.try_googl(data['fullurl']), data['extract']))
def shorten(inp): #".shorten <url> - Shortens a URL with goo.gl" url = web.try_googl(inp) return url
def map(inp, say=None): """map <place>|<origin to destination> - Gets a Map of place or route from Google Maps.""" say( web.try_googl("https://www.google.com/maps/?q={}".format( http.quote_plus(inp))))
def get_sales(mask): # Fetch data data = get_featuredcategories() flash_data = get_featured() # Break if either return empty - might be unnecessary if not data or not flash_data: return {} # Aggregate data fetchtime = int(time.time()) data["flash"] = {} data["flash"]["name"] = "Flash Sales" data["flash"]["items"] = [] data["featured"] = {} data["featured"]["name"] = "Featured Sales" data["featured"]["items"] = [] for item in flash_data["large_capsules"]: if "discount_expiration" not in item.keys(): item["discount_expiration"] = 9999999999 if item["discount_expiration"] - fetchtime <= 43200: data["flash"]["items"].append(item) else: data["featured"]["items"].append(item) # Check for no data if sum([ len(c_data.get('items', {})) for category, c_data in data.iteritems() if isinstance(c_data, dict) ]) == 0: return {}, False # Mask Data data = { k: v for k, v in data.items() if isinstance(v, dict) and k not in mask } if debug: log_sales_data(data, "data") # Format data sales = {} for category in data: if "items" not in data[category].keys(): data[category]["items"] = [] for item in data[category]["items"]: # Prepare item data try: # Bundles if set(["id", "url"]).issubset(set(item.keys())): if not item["final_price"] and not item["discounted"]: item["final_price"] = web.try_googl(item["url"]) item["discounted"] = True else: # Midweek Madness, etc if "url" in item.keys() and "id" not in item.keys(): data[category][ "name"] = item["name"] or data[category]["name"] item["id"] = str(item["url"])[34:-1] appdata = http.get_json( "http://store.steampowered.com/api/" "appdetails/?appids={}".format(item["id"]))[str( item["id"])]["data"] item["name"] = appdata["name"] if "Free to Play" in appdata["genres"]: item["final_price"] = 'Free to Play' item["discount_percent"] = '100' else: item["final_price"] = appdata["price_overview"][ "final"] item["discount_percent"] = appdata[ "price_overview"]["discount_percent"] item["discounted"] = True if int(item["discount_percent"]) > 0 \ else False except: # Unusuable Catagory e.g. Banner Announcments continue # Add appropriate item data to sales if item["discounted"]: item["name"] = item["name"].replace(" Advertising App", "") item = { k: u"{}".format(v) for k, v in item.items() if k in ["name", "final_price", "discount_percent"] } if data[category]["name"] not in sales.keys(): sales[data[category]["name"]] = [] sales[data[category]["name"]].append(item) # Filter and sort items sales = { category: sorted([item for item in items if item["name"] != "Uninitialized"], key=lambda x: x["name"]) for category, items in sales.items() } if debug: log_sales_data(sales, "sales") # Return usable data return sales, True
def youtube_url(match, bot=None, say=None): # if "autoreply" in bot.config and not bot.config["autoreply"]: # return url = web.try_googl(video_url % match) say(url + " - " + get_video_description(match.group(1)))
def get_sales(mask): # Fetch data data = get_featuredcategories() flash_data = get_featured() # Break if either return empty - might be unnecessary if not data or not flash_data: return {} # Aggregate data fetchtime = int(time.time()) data["flash"] = {} data["flash"]["name"] = "Flash Sales" data["flash"]["items"] = [] data["featured"] = {} data["featured"]["name"] = "Featured Sales" data["featured"]["items"] = [] for item in flash_data["large_capsules"]: if "discount_expiration" not in item.keys(): item["discount_expiration"] = 9999999999 if item["discount_expiration"] - fetchtime <= 28800: data["flash"]["items"].append(item) else: data["featured"]["items"].append(item) # Mask Data data = {k: v for k, v in data.items() if isinstance(v, dict) and k not in mask} if debug: log_sales_data(data, "data") # Format data sales = {} for category in data: if "items" not in data[category].keys(): data[category]["items"] = [] for item in data[category]["items"]: # Prepare item data try: # Bundles if set(["id", "url"]).issubset(set(item.keys())): if not item["final_price"] and not item["discounted"]: item["name"] = item["name"].encode("ascii", "ignore") item["final_price"] = web.try_googl(item["url"]) item["discounted"] = True else: # Midweek Madness, etc if "url" in item.keys() and "id" not in item.keys(): data[category]["name"] = item["name"] or data[category]["name"] item["id"] = str(item["url"])[34:-1] appdata = http.get_json("http://store.steampowered.com/api/" "appdetails/?appids={}".format(item["id"]))[item["id"]]["data"] item["name"] = appdata["name"].encode("ascii", "ignore") if "Free to Play" in appdata["genres"]: item["final_price"] = 'Free to Play' item["discount_percent"] = '100' else: item["final_price"] = appdata[ "price_overview"]["final"] item["discount_percent"] = appdata[ "price_overview"]["discount_percent"] item["discounted"] = True if int(item["discount_percent"]) > 0 \ else False except: # Unusuable Catagory e.g. Banner Announcments continue # Add appropriate item data to sales if item["discounted"]: item = {k: str(v) for k, v in item.items() if k in ["name", "final_price", "discount_percent"]} if data[category]["name"] not in sales.keys(): sales[data[category]["name"]] = [] sales[data[category]["name"]].append(item) sales = {k: sorted(v, key=lambda v: v["name"]) for k, v in sales.items()} if debug: log_sales_data(sales, "sales") # Return usable data return sales