def keyword_k(self, message, params=None, **kwargs): """Retrieve kernel.org Bugzilla bug information (ex: K12345)""" params = utils.ensure_int(params) if not params: return params = {"id": params} url = "https://bugzilla.kernel.org/show_bug.cgi" response = utils.fetch_url(url, params=params) if response.status_code != 200: return soup = BeautifulSoup(response.content) desc = utils.decode_entities(soup.head.title.string) try: status = soup.find("span", {"id": "static_bug_status"}).string status = status.capitalize().split("\n")[0] assignee = utils.decode_entities( soup.findAll("span", { "class": "vcard" })[0].contents[0].string) msg = "%s [Status: %s, Assignee: %s] %s" message.dispatch(msg % (desc, status, assignee, url)) except TypeError: return
def _fetch_xsa_data(self): resp = utils.fetch_url(self.XSA_URL) if resp.status_code != 200: print "Error fetching XSAs: %s" % resp.status_code return {} soup = BeautifulSoup(resp.text) rows = soup.findAll("tr") data = {} for row in rows: children = row.findChildren("td") if not children: continue try: xsa_id = children[0].contents[0].string except: continue try: xsa_link = "".join([self.XSA_URL, children[0].contents[0]["href"]]) except: xsa_link = None try: public_release = children[1].contents[0] except: public_release = None try: title = children[5].contents[0] except: title = None data[xsa_id] = {"id": xsa_id, "link": xsa_link, "public_release": public_release, "title": title} return data
def allergies(self, message, params=None, **kwargs): """Display current allergies in San Antonio, TX (ex: .allergies).""" d = datetime.datetime.now() weekend = d.isoweekday() in (6, 7) if weekend: message.dispatch("Unable to fetch allergy data on weekends.") return today = d.strftime("%Y-%m-%d") url = "http://saallergy.info/day/%s" % today headers = {"accept": "application/json"} response = utils.fetch_url(url, headers=headers) if response.status_code != 200: return data = response.json() text = "Allergies for %s: " % today for a in data["results"]: text = text + "%s - %s (%s) | " % (a["allergen"], a["level"], a["count"]) text = text.rstrip(" ") text = text.rstrip("|") message.dispatch(text)
def _find_issues(self, user_id): """Find all issues for a Redmine user""" url = "%s/issues.json?assigned_to_id=%s" % (self.redmine_url, user_id) response = utils.fetch_url(url) if response.status_code != 200: return return json.loads(response.content)["issues"]
def _find_issues(self, user_id): """Find all issues for a Redmine user""" url = "%s/issues.json?assigned_to_id=%s" % ( self.redmine_url, user_id) response = utils.fetch_url(url) if response.status_code != 200: return return json.loads(response.content)["issues"]
def distance(self, message, params=None, **kwargs): """Display distances (ex: .dist <nick|loc> [to <nick|loc>])""" maps_api = utils.get_config("GoogleMaps") try: key = maps_api.get("key") except Exception: message.dispatch("No Google Maps API key set") return parts = params.split(" to ") if not parts: message.dispatch(self.distance.__doc__) return dest_nick = parts[0].strip() if len(parts) > 1: origin_nick = parts[1].strip() else: origin_nick = message.source.split("!")[0] dest = None origin = None for filename in utils.list_files("Wunderground"): nick = filename.split("!")[0] if nick == dest_nick: dest = utils.read_file("Wunderground", filename) if nick == origin_nick: origin = utils.read_file("Wunderground", filename) if not dest: # They passed in a location dest = dest_nick if not origin: # They passed in a location origin = origin_nick origin = _resolve_pws(origin) dest = _resolve_pws(dest) resp = utils.fetch_url("https://maps.googleapis.com/maps/api" "/directions/json?origin=%s&destination=%s" "&key=%s" % (origin, dest, key)) msg = None if resp.status_code == 200: try: msg = resp.json()["routes"][0]["legs"][0]["distance"]["text"] except IndexError: pass if not msg: msg = "Unable to fetch data from Google Maps." message.dispatch(msg)
def distance(self, message, params=None, **kwargs): """Display distances (ex: .dist <nick|loc> [to <nick|loc>]).""" maps_api = utils.get_config("GoogleMaps") try: key = maps_api.get("key") except Exception: message.dispatch("No Google Maps API key set.") return parts = params.split(" to ") if not parts: message.dispatch(self.distance.__doc__) return dest_nick = parts[0].strip() if len(parts) > 1: origin_nick = parts[1].strip() else: origin_nick = message.source.split("!")[0] dest = None origin = None for filename in utils.list_files("Wunderground"): nick = filename.split("!")[0] if nick == dest_nick: dest = utils.read_file("Wunderground", filename) if nick == origin_nick: origin = utils.read_file("Wunderground", filename) if not dest: # They passed in a location dest = dest_nick if not origin: # They passed in a location origin = origin_nick origin = _resolve_pws(origin) dest = _resolve_pws(dest) resp = utils.fetch_url("https://maps.googleapis.com/maps/api" "/directions/json?origin=%s&destination=%s" "&key=%s" % (origin, dest, key)) msg = None if resp.status_code == 200: try: msg = resp.json()["routes"][0]["legs"][0]["distance"]["text"] except IndexError: pass if not msg: msg = "Unable to fetch data from Google Maps." message.dispatch(msg)
def kernel(self, message, params=None, **kwargs): """Retrieve the current kernel version (ex: .kernel)""" url = "https://www.kernel.org/kdist/finger_banner" response = utils.fetch_url(url) if response.status_code != 200: return r = re.compile("(.* mainline .*)") m = r.search(response.content) kernel = m.group(1).replace(" ", "") message.dispatch(kernel)
def _find_users(self, offset=None): """Find all Redmine users""" if offset: url = "%s/users.json?limit=100&offset=%d" % (self.redmine_url, offset) else: url = "%s/users.json?limit=100" % self.redmine_url response = utils.fetch_url(url) if response.status_code != 200: return return json.loads(response.content)["users"]
def _find_users(self, offset=None): """Find all Redmine users""" if offset: url = "%s/users.json?limit=100&offset=%d" % ( self.redmine_url, offset) else: url = "%s/users.json?limit=100" % self.redmine_url response = utils.fetch_url(url) if response.status_code != 200: return return json.loads(response.content)["users"]
def google(self, message, params=None, **kwargs): """Search Google (ex: .g <query>)""" url = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0" response = utils.fetch_url(url, params={"q": params}) if response.status_code != 200: return json_obj = json.loads(response.content) results = json_obj["responseData"]["results"] if results: for r in results: message.dispatch("%s: %s" % (r["titleNoFormatting"].encode( "ascii", "ignore"), r["unescapedUrl"])) else: message.dispatch("No results found: '%s'" % params)
def google(self, message, params=None, **kwargs): """Search Google (ex: .g <query>)""" url = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0" response = utils.fetch_url(url, params={"q": params}) if response.status_code != 200: return json_obj = json.loads(response.content) results = json_obj["responseData"]["results"] if results: for r in results: message.dispatch("%s: %s" % ( r["titleNoFormatting"] .encode("ascii", "ignore"), r["unescapedUrl"])) else: message.dispatch("No results found: '%s'" % params)
def _find_title(self, message, url): """Find the title of a given URL.""" # NOTE(jk0): Slack does some weird things with URLs. url = url.replace("<", "").replace(">", "").split("|")[0] if not url.startswith(("http://", "https://")): url = "http://" + url try: response = utils.fetch_url(url) except Exception: return soup = BeautifulSoup(response.content) if soup.head: title = utils.decode_entities(soup.head.title.string) content_type = response.headers.get("Content-Type") message.dispatch("%s (%s)" % (title, content_type)) else: message.dispatch("No title found: %s" % url)
def urban(self, message, params=None, **kwargs): """Search Urban Dictionary (ex: .urban <query>)""" url = "http://www.urbandictionary.com/define.php" response = utils.fetch_url(url, params={"term": params}) if response.status_code != 200: return soup = BeautifulSoup(response.content) try: meaning = soup.find("div", {"class": "meaning"}).text example = soup.find("div", {"class": "example"}).text except AttributeError: message.dispatch("No results found: '%s'" % params) meaning = utils.decode_entities(meaning) example = utils.decode_entities(example) message.dispatch("%s (ex: %s)" % (meaning, example))
def wikipedia(self, message, params=None, **kwargs): """Search Wikipedia (ex: .wikipedia <query>)""" url = "https://en.wikipedia.org/w/api.php" response = utils.fetch_url(url, params={ "action": "query", "generator": "allpages", "gaplimit": 4, "gapfrom": params, "format": "json" }) if response.status_code != 200: return pages = json.loads(response.content)["query"]["pages"] for page in pages.values(): title = page["title"] title = re.sub(" ", "_", title) message.dispatch("http://en.wikipedia.org/wiki/%s" % title)
def _find_issue(self, message, issue_id): """Find and display a Redmine issue""" url = "%s/issues/%s.json" % (self.redmine_url, issue_id) response = utils.fetch_url(url) if response.status_code != 200: return try: issue = json.loads(response.content)["issue"] except Exception: return msg = "RM %s #%s: %s [Status: %s, Priority: %s, Assignee: %s]" % ( issue["tracker"]["name"], issue["id"], issue["subject"], issue["status"]["name"], issue["priority"]["name"], issue.get("assigned_to", {}).get("name", "N/A")) url = "https://%s/issues/%s" % (self.redmine_domain, issue["id"]) message.dispatch("%s %s" % (msg, url))
def oncall(self, message, params=None, **kwargs): """Show who is on call (ex: .oncall [<group>]).""" url = "%s/api/v1/escalation_policies/on_call" % self.subdomain headers = { "Authorization": "Token token=%s" % self.key, "Content-Type": "application/json" } response_json = utils.fetch_url(url, headers=headers, params={"query": params}).json() for policy in response_json["escalation_policies"]: message.dispatch(policy["name"]) for level in policy["on_call"]: message.dispatch("- Level %s: %s <%s>" % ( level["level"], level["user"]["name"], level["user"]["email"])) time.sleep(2)
def boxoffice(self, message, params=None, **kwargs): """Display top box office movies""" resp = utils.fetch_url("http://www.rottentomatoes.com") if resp.status_code != 200: message.dispatch("Could not fetch box office results.") return message.dispatch("Top 10 at the Box Office") message.dispatch("=" * 64) soup = BeautifulSoup(resp.text) rows = soup.findAll("table", id="Top-Box-Office")[0].findAll("tr") for row in rows: children = row.findAll("td") try: score = row("span", **{"class": "tMeterScore"})[0].contents[0] title = children[1].find("a").string take = children[2].string.strip() except Exception: pass msg = "%s %s%s" % (score.rjust(3), title.ljust(40), take.rjust(10)) message.dispatch(msg)
def _find_title(self, message, url): """Find the title of a given URL""" # NOTE(jk0): Slack does some weird things with URLs. url = url.replace("<", "").replace(">", "").split("|")[0] if not url.startswith(("http://", "https://")): url = "http://" + url response = utils.fetch_url(url) if response.status_code != 200: return soup = BeautifulSoup(response.content) if soup.head: title = utils.decode_entities(soup.head.title.string) content_type = response.headers.get("Content-Type").split(";", 1)[0] content_size = response.headers.get("Content-Length") content_size = content_size + " bytes" if content_size else "N/A" message.dispatch("%s (%s, %s)" % (title, content_type, content_size)) else: message.dispatch("No title found for %s" % url)
def get(self, issue_id): url = "%s/rest/api/latest/issue/%s" % (self.auth_server, issue_id) return utils.fetch_url(url, verify=False, auth=(self.username, self.password))