def get(self, *args): uri = args[1] if not uri: self.ok("Please provide a URI.") uri = urllib.unquote(uri) try: html = api.urlfetch.fetch(uri).content tree = BeautifulSoup.BeautifulSoup(html) html = tree.prettify().decode("utf8") text = html2text.html2text(html).encode("utf8") except Exception: return self.ok("Error fetching URI.") text = base.collapse(re.sub("\W|\d", " ", text)) payload = urllib.urlencode({"data": text}) try: headers = {"Content-Type": "application/x-www-form-urlencoded"} html = api.urlfetch.fetch( API_URI, method=api.urlfetch.POST, payload=payload, headers=headers).content except Exception: return self.ok("Error fetching URI results.") tree = BeautifulSoup.BeautifulSoup(html) try: message = base.collapse(tree.find("b").string) except: return self.ok("Error parsing results.") return self.ok(message)
def get(self, *args): query = base.collapse(urllib.unquote(args[1])) if not query: return self.ok("Please provide two Wikipedia article titles.") query = StringIO.StringIO(query) try: query_tokens = csv.reader(query, delimiter=" ").next() except: return self.ok("Please use proper quoting for arguments.") try: from_name = query_tokens[0] or "" except: from_name = "" if not from_name: return self.ok("Please name a starting Wikipedia article title.") try: to_name = query_tokens[1] or "" except: to_name = "" if not to_name: return self.ok("Please name an ending Wikipedia article title.") query = urllib.urlencode({"from": from_name, "to": to_name}) uri = API_URI + "?" + query try: html = api.urlfetch.fetch(uri).content html = unescape.unescape(html.decode("latin1")) tree = BeautifulSoup.BeautifulSoup(html) except Exception, error: return self.ok("Timeout fetching Wikipedia distance.")
class Main(base.RequestHandler): def get(self, *args): domain = args[1] or "" if not domain: return self.ok("Please provide a domain name.") domain = urllib.unquote(domain).split() if len(domain) > 1: return self.ok("Please provide a single domain name.") domain = domain[0] query = urllib.urlencode({"h": domain, "f": 0}) uri = API_URI + "?" + query try: tree = ElementTree.XML(api.urlfetch.fetch(uri).content) except Exception, error: return self.ok("Invalid API response.") registered = tree.find(".//regrinfo/registered") if registered.text.strip() == "no": return self.ok("The %s domain is not registered." % domain) message = "The %s domain is registered" % domain try: message += " to %s" % base.collapse( tree.find(".//owner/name").text) except: pass if message[-1] != ".": message += "." return self.ok(message.encode("utf8"))
def get(self, *args): try: query = base.collapse(urllib.unquote(args[1])) except Exception: return self.ok("Please provide some sample elements.") query = StringIO.StringIO(query) words = csv.reader(query, delimiter=" ").next() keys = map(str.__add__, ['q'] * 5, map(str, range(1, 6))) dct = {} for i in range(0, 5): try: dct[keys[i]] = words[i] except Exception: dct[keys[i]] = "" dct['btn'] = 'Small Set (15 items or fewer)' dct['hl'] = 'en' query = urllib.urlencode(dct) try: fetch = api.urlfetch.fetch(baseuri + '?' + query) html = fetch.content except Exception: return self.ok("Error fetching results") try: tree = BeautifulSoup.BeautifulSoup(html) #convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES) links = tree.findAll("a", {"href": startswithgoogle}) message = ", ".join(map((lambda x: x.string), links)) if not message: message = "Nothing found. All lonely." except: return self.ok("Error parsing results.") return self.ok(message)
def get(self, *args): try: query=base.collapse(urllib.unquote(args[1])) except Exception: return self.ok("Please provide some sample elements.") query=StringIO.StringIO(query) words=csv.reader(query, delimiter=" ").next() keys=map(str.__add__,['q']*5,map(str,range(1,6))) dct={} for i in range(0,5): try: dct[keys[i]]=words[i] except Exception: dct[keys[i]]="" dct['btn']='Small Set (15 items or fewer)' dct['hl']='en' query=urllib.urlencode(dct) try: fetch=api.urlfetch.fetch(baseuri+'?'+query) html=fetch.content except Exception: return self.ok("Error fetching results") try: tree = BeautifulSoup.BeautifulSoup(html) #convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES) links=tree.findAll("a", {"href":startswithgoogle}) message = ", ".join(map((lambda x: x.string), links)) if not message: message="Nothing found. All lonely." except: return self.ok("Error parsing results.") return self.ok(message)
def get(self,*args): try: query=base.collapse(urllib.unquote(args[1])) query=StringIO.StringIO(query) words=csv.reader(query, delimiter=" ").next() actor1=words[1] actor2=words[0] except Exception: return self.ok("Please provide exactly two actor names.") message=self.getit(actor1, actor2) if not message: # Try this? u=self.tree.find('i') if not u: # FAIL. message="Fail. No connection found." return self.ok(message) name=str(u.string) [f, s]=name.split(' ',1) if actor1 == name: actor1 = "%s, %s (I)"%(s,f) else: actor2 = "%s, %s (I)"%(s,f) message=self.getit(actor1, actor2) if not message: # FAIL message="One or both of those actors was not unique in the database. We even tried (%s)"%str((actor1,actor2)) else: message=("((Tried (%s))) "%str((actor1, actor2)))+message return self.ok(message)
class Main(base.RequestHandler): def get(self, *args): user = args[1] or "" if args[3]: user = args[3] if not user: return self.ok("Please provide a username.") user = override.get(user.lower(), user) query = urllib.urlencode({ "method": "user.getrecenttracks", "api_key": API_KEY, "user": user }) uri = API_URI + "?" + query try: tree = ElementTree.XML(api.urlfetch.fetch(uri).content) except Exception, error: return self.ok("No user details found.") track = tree.find(".//track") if not track: return self.ok("No tracks found.") if track.get("nowplaying"): status = "Now playing " else: status = "Last played " message = status try: message += base.collapse(track.find(".//name").text) except: pass try: message += " by %s" % base.collapse(track.find(".//artist").text) except: pass try: message += " from the album %s" % base.collapse( track.find(".//album").text) except: pass try: message += " - %s" % base.collapse(track.find(".//url").text) except: pass if not message: return self.ok("No track information found.") return self.ok(message.encode("utf8"))
def get(self, *args): query = base.collapse(urllib.unquote(args[1])) query = urllib.urlencode({"key": query, "type": "Books", "page":"1"}) uri = API_URI + "?" + query try: html = api.urlfetch.fetch(uri).content html = unescape.unescape(html.decode("latin1")) tree = BeautifulSoup.BeautifulSoup(html) except Exception, error: return self.ok("Timeout fetching ISBN information.")
def get(self, *args): query = base.collapse(urllib.unquote(args[1])) query = urllib.urlencode({"key": query, "type": "Books", "page": "1"}) uri = API_URI + "?" + query try: html = api.urlfetch.fetch(uri).content html = unescape.unescape(html.decode("latin1")) tree = BeautifulSoup.BeautifulSoup(html) except Exception, error: return self.ok("Timeout fetching ISBN information.")
def get(self, *args): source = "http://www.scorespro.com/rss/live-soccer.xml" text = args[1] or "" if not text: return self.ok("Please specify a team.") text = urllib.unquote(text) regexp = re.compile("^.*%s.*$" % text, re.IGNORECASE) doc = parseString(api.urlfetch.fetch(source).content) for entry in doc.getElementsByTagName("title")[1:-1]: scoreline = entry.childNodes[0].toxml() if regexp.match(scoreline): return self.ok(base.collapse(scoreline)) self.ok("No score was found for your query '%s'!" % text)
class Main(base.RequestHandler): def get(self, *args): query = base.collapse(urllib.unquote(args[1])) if not query: return self.ok("Please provide two Wikipedia article titles.") query = StringIO.StringIO(query) try: query_tokens = csv.reader(query, delimiter=" ").next() except: return self.ok("Please use proper quoting for arguments.") try: from_name = query_tokens[0] or "" except: from_name = "" if not from_name: return self.ok("Please name a starting Wikipedia article title.") try: to_name = query_tokens[1] or "" except: to_name = "" if not to_name: return self.ok("Please name an ending Wikipedia article title.") query = urllib.urlencode({"from": from_name, "to": to_name}) uri = API_URI + "?" + query try: html = api.urlfetch.fetch(uri).content html = unescape.unescape(html.decode("latin1")) tree = BeautifulSoup.BeautifulSoup(html) except Exception, error: return self.ok("Timeout fetching Wikipedia distance.") try: messages = [] for a in tree.findAll("a"): messages.append(base.collapse(a.string)) messages = messages[1:] except: return self.ok("Could not find Wikipedia distance.") if not messages: try: self.ok(tree.find("b").string + ".") except: try: self.ok(tree.find("h2").string + ".") except: self.ok("Error parsing Wikipedia distance.") message = " > ".join(messages) return self.ok(message)
def get(self, *args): arg=args[0].split('/')[1] # starts with a slash... ch=arg.strip()[0].upper() pg=urllib.quote(urllib.unquote(arg.strip()).replace(' ','-')) url="http://www.catb.org/jargon/html/" + ch + "/" + pg + ".html" data=api.urlfetch.fetch(url) try: tree=BeautifulSoup.BeautifulSoup(data.content, convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES) except: return self.ok("Error fetching "+url) try: p=tree.find("p") message=base.collapse("".join(p.findAll(text=True))) except: return self.ok("Could not find definition.") return self.ok(message)
def get(self, *args): uri = args[1] or "" if not uri: return self.ok("Please specify a URI.") uri = urllib.unquote(uri) if not uri.startswith("http://"): uri = "http://" + uri try: tree = BeautifulSoup.BeautifulSoup( api.urlfetch.fetch(uri).content, convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES) except: return self.ok("Error fetching fact.") try: message = base.collapse(tree.find("p", {"class" : "fact"}).string) except: return self.ok("Could not find fact.") return self.ok(message)
def get(self, *args): arg = args[0].split('/')[1] # starts with a slash... ch = arg.strip()[0].upper() pg = urllib.quote(urllib.unquote(arg.strip()).replace(' ', '-')) url = "http://www.catb.org/jargon/html/" + ch + "/" + pg + ".html" data = api.urlfetch.fetch(url) try: tree = BeautifulSoup.BeautifulSoup( data.content, convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES) except: return self.ok("Error fetching " + url) try: p = tree.find("p") message = base.collapse("".join(p.findAll(text=True))) except: return self.ok("Could not find definition.") return self.ok(message)
def get(self, *args): word = args[1] if not word: self.ok("Please provide a word.") word = urllib.unquote(word) payload = urllib.urlencode({"q": word}) try: headers = {"Content-Type": "application/x-www-form-urlencoded"} html = api.urlfetch.fetch(API_URI, method=api.urlfetch.POST, payload=payload, headers=headers).content except Exception: return self.ok("Error fetching results.") tree = BeautifulSoup.BeautifulSoup(html) try: message = base.collapse(tree.find("blockquote").string) message = unescape.unescape(message) except: return self.ok("Error parsing results.") return self.ok(message)
def get(self, *args): word = args[1] if not word: self.ok("Please provide a word.") word = urllib.unquote(word) payload = urllib.urlencode({"q": word}) try: headers = {"Content-Type": "application/x-www-form-urlencoded"} html = api.urlfetch.fetch( API_URI, method=api.urlfetch.POST, payload=payload, headers=headers).content except Exception: return self.ok("Error fetching results.") tree = BeautifulSoup.BeautifulSoup(html) try: message = base.collapse(tree.find("blockquote").string) message = unescape.unescape(message) except: return self.ok("Error parsing results.") return self.ok(message)