def trig_mc(self, bot, source, target, trigger, argument): term = argument.strip() if not term: return "usage: .metacritic <game title> or <game> <platform> (slower)" url = "http://apps.metacritic.com/search/process?ty=3&tfs=game_title&ts=" + utility.escape(term) data = utility.read_url(url)["data"] result = self.parse_result(data, term, url) if result: return result error_handler.output_message("<metacritic> title search failed.") url = "http://apps.metacritic.com/search/process?ty=3&ts=" + utility.escape(term) data = utility.read_url(url)["data"] result = self.parse_result(data, term, url) if result: return result return ( "Found nothing. Try it yourself: " + "http://apps.metacritic.com/search/process?ty=3&ts=" + utility.escape(term) )
def trig_mc(self, bot, source, target, trigger, argument): term = argument.strip() if not term: return "usage: .metacritic <game title> or <game> <platform> (slower)" url = 'http://apps.metacritic.com/search/process?ty=3&tfs=game_title&ts=' + utility.escape( term) data = utility.read_url(url)["data"] result = self.parse_result(data, term, url) if result: return result error_handler.output_message("<metacritic> title search failed.") url = 'http://apps.metacritic.com/search/process?ty=3&ts=' + utility.escape( term) data = utility.read_url(url)["data"] result = self.parse_result(data, term, url) if result: return result return "Found nothing. Try it yourself: " + 'http://apps.metacritic.com/search/process?ty=3&ts=' + utility.escape( term)
def posten_postnr_query(self, Address, Postort): url = 'http://www.posten.se/soktjanst/postnummersok/resultat.jspv?gatunamn=' + utility.escape( Address) + '&po=' + utility.escape(Postort) response = utility.read_url(url) data = response["data"] postnrs = {} for line in data.split("\n"): search = re.search( '<TD class="firstcol">([^<]*)</TD><TD>([^<]*)</TD><TD>([^<]*)', line) if search: if postnrs.has_key(search.group(3)): postnrs[search.group( 3)] += " & " + search.group(1) + " " + search.group(2) else: postnrs[search.group( 3)] = search.group(1) + " " + search.group(2) result = "" for postnr in postnrs.iterkeys(): if len(result) != 0: result += ", " result += "%s: %s" % (postnr, postnrs[postnr]) # print postnrs[postnr] if len(result) == 0: return "no result :<" else: # print result return result
def posten_postnr_query(self, Address, Postort): url = 'http://www.posten.se/soktjanst/postnummersok/resultat.jspv?gatunamn=' + utility.escape(Address) + '&po=' + utility.escape(Postort) response = utility.read_url(url) data = response["data"] postnrs = {} for line in data.split("\n"): search = re.search('<TD class="firstcol">([^<]*)</TD><TD>([^<]*)</TD><TD>([^<]*)', line) if search: if postnrs.has_key(search.group(3)): postnrs[search.group(3)] += " & " + search.group(1) + " " + search.group(2) else: postnrs[search.group(3)] = search.group(1) + " " + search.group(2) result = "" for postnr in postnrs.iterkeys(): if len(result) != 0: result += ", " result += "%s: %s" % (postnr, postnrs[postnr]) # print postnrs[postnr] if len(result) == 0: return "no result :<" else: # print result return result
def icq_lookup(icqid): url = 'http://www.icq.com/people/about_me.php?uin=' + utility.escape(icqid) response = utility.read_url(url) data = response["data"].replace("\n", "") m = re.search( '<div class="uinf-2-2-2-1">(.*?)<\/div>.*?<div class="uinf-2-2-2-2">(.*?)<\/div>.*?<div class="uinf-2-2-2-4">(.*?)<\/div>.*?<div class="uinf-2-2-2-4">(.*?)<\/div>', data) if m: nick = m.group(1) info = m.group(2) if info: info = re.sub("\n|\r\n|\n\n", ", ", info) city = m.group(3) country = m.group(4) if nick: result = nick if info or city or country: result = result + ": " if info: result = result + info if city: result = result + ", " + city if country: result = result + ", " + country return result else: return None
def get_fav(self, trig, args): if trig in self.favorites: url = self.favorites[trig] url = url.replace('%s', utility.escape(args).replace('%2F', '/')) return url else: return None
def posten_kolli_query(kolli_id): url = ( "http://posten.se/tracktrace/TrackConsignments_do.jsp?trackntraceAction=saveSearch&consignmentId=" + utility.escape(kolli_id) ) response = utility.read_url(url) data = response["data"] search = re.search( "(?ims)<dt>Från:</dt><dd>(.*?)</dd>.*?rightcol.*h2>.*<h3>(.*?)</h3>\s*?(.*?)(<br/>|<div).*?<dt>Vikt:</dt><dd>(.*?)</dd>", data, ) if search: sender = search.group(1) date = search.group(2) status = search.group(3) weight = search.group(5) if date and status: result = "%s fr\xe5n %s | %s: %s | %s" % (weight, sender, date, re.sub("<.+?>", "", status), url) return result else: return None else: return None
def query(self, argument): decoder = JSONDecoder() argument = utility.escape(argument) api_url = u"http://www.imdbapi.com/?t=%(search_term)s&r=json&plot=short" % \ {"search_term": argument} site_search_url = u"http://akas.imdb.com/find?s=all&q=" + argument response = utility.read_url(api_url) if not response: return u"Couldn't connect to the API :( | Manual search: " + site_search_url try: data = decoder.decode(response['data']) except Exception: return u"Couldn't parse the API output :( | Manual search: " + site_search_url if data.get(u"Response") != u"True": return u"No results found! Maybe you should try searching manually: " + \ site_search_url return \ (u"%(title)s (%(year)s) - Rating: %(rating)s out of 10 - Genre: %(genre)s - " + \ u"http://akas.imdb.com/title/%(id)s/ | More results: %(site_search_url)s") % \ {u"title": data.get(u"Title", u"Missing title :S"), u"year": data.get(u"Year", u"Unknown year"), u"rating": data.get(u"Rating", u"N/A"), u"genre": data.get(u"Genre", u"Unknown"), u"id": data.get(u"ID", u"tt0107838"), u"site_search_url": site_search_url}
def trig_temp(self, bot, source, target, trigger, argument): """ Usage: .temp [City] Uses data from temperature.nu, please direct all complaints to www.temperatur.nu """ argument = argument.strip() if argument: argument = argument.strip() self.places[source] = argument self.save() else: if source in self.places: argument = self.places[source] else: argument = "ryd" argument_text = argument argument = utility.asciilize(argument) argument = utility.escape(argument) # awesome hack to include avesta! if argument.lower() == "avesta": actual_argument = "fors" else: actual_argument = argument url = "http://www.temperatur.nu/termo/%s/temp.txt" % actual_argument.lower() response = utility.read_url(url) m = None if response: data = response["data"] m = _get_temp_re.match(data) if m and m.group(1) != "not found": return "Temperature in %s: %s." % (argument_text, m.group(1)) else: return "Temperature in %s: invalid place, try using .yr instead." % (argument_text)
def icq_lookup(icqid): url = 'http://www.icq.com/people/about_me.php?uin=' + utility.escape(icqid) response = utility.read_url(url) data = response["data"].replace("\n", "") m = re.search('<div class="uinf-2-2-2-1">(.*?)<\/div>.*?<div class="uinf-2-2-2-2">(.*?)<\/div>.*?<div class="uinf-2-2-2-4">(.*?)<\/div>.*?<div class="uinf-2-2-2-4">(.*?)<\/div>', data) if m: nick = m.group(1) info = m.group(2) if info: info = re.sub("\n|\r\n|\n\n",", ",info) city = m.group(3) country = m.group(4) if nick: result = nick if info or city or country: result = result + ": " if info: result = result + info if city: result = result + ", " + city if country: result = result + ", " + country return result else: return None
def tyda_lookup(word, lang): # Assemble URL url = "http://tyda.se/search?w=" + utility.escape(word) + "&source_lang=" + \ utility.escape(lang) # Fetch result response = utility.read_url(url) if response: data = response["data"].replace("\n", "") else: return "Ohnoes, nothing found." # Look for word pattern = "\<span class=\"tyda_entry_base\"( title=\"[^\"]+\")?\>([^\<]+)\<\/span\>(.*?)\<\/td\>(.+?)\<\/table\>(\<table cellpadding=\"0\" cellspacing=\"0\" class=\"tyda_entry\"\>|\<script type=\"text\/javascript\"\>)" match = re.search(pattern, data) if not match: return "No result found, maybe you should try searching manually: " + url base_word = match.group(2).replace(" (", ", ").replace(")", "") inflected_word_data = match.group(3) inflected_words = [] translation_data = match.group(4) translated_words = [] pattern = "\<span class=\"tyda_entry_inflected\" title=\"[^\"]+\"\>([^\<]+)\<\/span\>" iterator = re.finditer(pattern, inflected_word_data) for match in iterator: inflected_words.append( match.group(1).replace(" (", ", ").replace(")", "")) if inflected_words: inflected_words = " (" + ", ".join(inflected_words) + ")" else: inflected_words = "" pattern = "\<a id=\"tyda_transR\d+\" href=\"\/search\/[^\"]+\"\>([^\<]+)\<\/a\>" iterator = re.finditer(pattern, translation_data) for match in iterator: translated_words.append(match.group(1)) return base_word + inflected_words + ": " + ", ".join( translated_words) + " | " + url
def tyda_lookup(word, lang): # Assemble URL url = "http://tyda.se/search?w=" + utility.escape(word) + "&source_lang=" + \ utility.escape(lang) # Fetch result response = utility.read_url(url) if response: data = response["data"].replace("\n", "") else: return "Ohnoes, nothing found." # Look for word pattern = "\<span class=\"tyda_entry_base\"( title=\"[^\"]+\")?\>([^\<]+)\<\/span\>(.*?)\<\/td\>(.+?)\<\/table\>(\<table cellpadding=\"0\" cellspacing=\"0\" class=\"tyda_entry\"\>|\<script type=\"text\/javascript\"\>)" match = re.search(pattern, data) if not match: return "No result found, maybe you should try searching manually: " + url base_word = match.group(2).replace(" (", ", ").replace(")", "") inflected_word_data = match.group(3) inflected_words = [] translation_data = match.group(4) translated_words = [] pattern = "\<span class=\"tyda_entry_inflected\" title=\"[^\"]+\"\>([^\<]+)\<\/span\>" iterator = re.finditer(pattern, inflected_word_data) for match in iterator: inflected_words.append(match.group(1).replace(" (", ", ").replace(")", "")) if inflected_words: inflected_words = " (" + ", ".join(inflected_words) + ")" else: inflected_words = "" pattern = "\<a id=\"tyda_transR\d+\" href=\"\/search\/[^\"]+\"\>([^\<]+)\<\/a\>" iterator = re.finditer(pattern, translation_data) for match in iterator: translated_words.append(match.group(1)) return base_word + inflected_words + ": " + ", ".join(translated_words) + " | " + url
def run_command(self, argument): if not argument: return self.USAGE query_string = utility.escape(argument) result = self.calculate(query_string) manual_search = self.URL_SEARCH % query_string if result['error']: return u"Invalid input :S | Google search: %s" % manual_search left = result.get('lhs', u"?") right = result.get('rhs', u"?") output = u"%s = %s | %s" % (left, right, manual_search) return self.adapt_tags(output)
def imdb_search(name): # url = "http://akas.imdb.com/find?s=tt&q=%s" % name.replace(" ","+") url = "http://www.imdb.com/find?s=tt&q=%s" % utility.escape(name) response = utility.read_url(url) data = response["data"] # print url m = re.search("<title>(.*?) \((\d+)\)<\/title>", data) if m: return imdb_info(url) m = re.search('<a href="(\/(title)\/.*?)"', data) if m: url = "http://www.imdb.com%s" % m.group(1) return imdb_info(url)
def google_pages(string): url = 'http://www.google.se/search?q=' + utility.escape(string) + '&ie=UTF-8&oe=UTF-8' response = utility.read_url(url) data = response["data"] search = re.search('swrnum=(\d+)">', data) if search: result = search.group(1) if result: return int(result, 10) else: return None else: return None
def trig_nextep(self, bot, source, target, trigger, argument): """Information about the latest and next episode of a TV show.""" # Sanitize argument argument = utility.escape(argument.strip()) if not argument: return self.usage # Fetch data info = self.fetch_tv_info(argument) if "Show Name" not in info: return "TV show not found | Manual search: " + (self.search_url % argument) # Name of TV series name = info["Show Name"] # Premiere year if "Premiered" in info: name += " (" + info["Premiered"] + ")" # Latest episode if "Latest Episode" in info: last_ep = info["Latest Episode"].replace("^", ", ") else: last_ep = "Unknown" # Next episode if "Next Episode" in info: next_ep = info["Next Episode"].replace("^", ", ") else: next_ep = "Unknown" if "Status" in info: next_ep += " - " + info["Status"].replace("^", ", ") # Info URL if "Show URL" in info: url = info["Show URL"] else: url = self.search_url % argument # Compose result return "%s | Latest: %s | Next: %s | Read more: %s" % (name, last_ep, next_ep, url)
def trig_down(self, bot, source, target, trigger, argument): queriedUrl = argument.strip() if not queriedUrl: return "usage: .down http://example.url" url = 'http://downforeveryoneorjustme.com/' + utility.escape(queriedUrl) response = utility.read_url(url) data = response["data"] search = re.search(r'<div id\=\"container\">\s+(.+)<p>.+?<\/p>.+<\/div>', data, re.S) if search: message = search.group(1) message = re.sub(r'<[^>]*?>', '', message) return message else: return "No result. downforeveryoneorjustme.com might be down. Oh, the irony."
def trig_down(self, bot, source, target, trigger, argument): queriedUrl = argument.strip() if not queriedUrl: return "usage: .down http://example.url" url = 'http://downforeveryoneorjustme.com/' + utility.escape( queriedUrl) response = utility.read_url(url) data = response["data"] search = re.search( r'<div id\=\"container\">\s+(.+)<p>.+?<\/p>.+<\/div>', data, re.S) if search: message = search.group(1) message = re.sub(r'<[^>]*?>', '', message) return message else: return "No result. downforeveryoneorjustme.com might be down. Oh, the irony."
def wp_get(self, language, item): url = "http://%s.wikipedia.org/wiki/%s" % ( language, utility.escape(item.replace(" ", "_"))) response = utility.read_url(url) if not response: return (None, None) data = response["data"] url = response["url"] # sometimes there is a nasty table containing the first <p>. we can't allow this to happen! pattern = re.compile("<table.*?>.+?<\/table>", re.MULTILINE) data = re.sub(pattern, "", data) m = re.search("<p>(.+?)<\/p>", data) if m: data = utility.unescape(m.group(1)) data = re.sub("<.+?>", "", data) data = re.sub("\[\d+\]", "", data) index = data.rfind(".", 0, 300) if index == -1: index = 300 if index + 1 < len(data) and data[index + 1] == '"': index += 1 data = data[0:index + 1] if "Wikipedia does not have an article with this exact name." in data: data = None else: data = None return (url, data)
def wp_get(self, item): url = "http://en.wikipedia.org/wiki/%s" % utility.escape(item.replace(" ", "_")) response = utility.read_url(url) if not response: return (None, None) data = response["data"] url = response["url"] # sometimes there is a nasty table containing the first <p>. we can't allow this to happen! pattern = re.compile("<table.*?>.+?<\/table>", re.MULTILINE) data = re.sub(pattern, "", data) m = re.search("<p>(.+?)<\/p>", data) if m: data = utility.unescape(m.group(1)) data = re.sub("<.+?>", "", data) data = re.sub("\[\d+\]", "", data) index = data.rfind(".", 0, 300) if index == -1: index = 300 if index+1 < len(data) and data[index+1] == '"': index += 1 data = data[0:index+1] if "Wikipedia does not have an article with this exact name." in data: data = None else: data = None return (url, data)
def trig_temp(self, bot, source, target, trigger, argument): """ Usage: .temp [City] Uses data from temperature.nu, please direct all complaints to www.temperatur.nu """ argument = argument.strip() if argument: argument = argument.strip() self.places[source] = argument self.save() else: if source in self.places: argument = self.places[source] else: argument = 'ryd' argument_text = argument argument = utility.asciilize(argument) argument = utility.escape(argument) # awesome hack to include avesta! if argument.lower() == "avesta": actual_argument = "fors" else: actual_argument = argument url = "http://www.temperatur.nu/termo/%s/temp.txt" % actual_argument.lower( ) response = utility.read_url(url) m = None if response: data = response["data"] m = _get_temp_re.match(data) if m and m.group(1) != "not found": return "Temperature in %s: %s." % (argument_text, m.group(1)) else: return "Temperature in %s: invalid place, try using .yr instead." % ( argument_text)
def run_command(self, query_string): if not query_string: return self.USAGE query_string = utility.escape(query_string) info = self.fetch_show_info(query_string) name = info.get('Show Name') if not name: return u"TV show not found | Manual search: " + \ self.URL_SEARCH % query_string year = info.get('Premiered') if year and year not in name: name += u" (%s)" % year latest_ep = info.get('Latest Episode', u"Unknown") next_ep = info.get('Next Episode', u"Unknown - %s" % info.get('Status', u"Unknown status")) url = info.get('Show URL', self.URL_SEARCH % query_string) return u"%s | Latest: %s | Next: %s | Read more: %s" % \ (name, latest_ep, next_ep, url)
cursor = conn.cursor();# get cursor to perform action #data from client is for one row (at a time) in database #request[key] -> MiniFieldStorage('topic_title','hi'); read via name, value #textbox HTML element contains 'topic_title'; represents a name-value pair #textarea HTML element contains 'topic_solution'; represents a name-value pair #for key in request:#effectively, for each data entry HTML element with name-value #works for single or multiple values, ie as single value or list of values #characters < > " are replaced with HTML entities #columns.append('"' + utility.escape(request.getlist(key)[0]) + '"'); #characters < > " are replaced with HTML entities if (key_topic_textbox_name in request) and \ (key_solution_textarea_name in request): #get and escape topic_title value topic_title = utility.escape(request.getlist(key_topic_textbox_name)[0]); #get and escape topic_solution value topic_solution = utility.escape(\ request.getlist(key_solution_textarea_name)[0]); #store topic_title,topic_solution as key,value pair in shelve file utility.write_to_shelve_file(topic_title, topic_solution); #setup text to be used in sql insert syntax, # ie in insert...values("topic_title","topic_solution"); columns.append('"' + topic_title + '"'); columns.append('"' + topic_solution + '"'); #compose sql insert syntax content = '(' + ','.join(columns) + ')'; content = 'insert into howto.topics (title,solution) values ' + content + ';'; #cursor.execute('select * from vad.contacts;');#returns number of rows found cursor.execute(content); # returns number of rows affected
action = client_request.getlist("action")[0].strip(); actions = {}; actions[0] = "get_topic_titles_xml"; actions[1] = "get_topic_solution"; actions[2] = "delete_topic_entry"; if action==actions[0]: #<topics><topic><title>some title</title></topic></topics> result = utility.compose_titles_xml(); #contains titles #<topics><topic><title>..</title><solution>..</solution></topic></topics> #result = utility.compose_xml(); #contains title and solution elif action==actions[1]: result = client_request.getlist("topic_title")[0]; (key,result) = utility.read_from_shelve_file(utility.escape(result)); elif action==actions[2]: title = client_request.getlist("topic_title")[0]; #delete from shelve file result = utility.remove_entry(utility.escape(title)); #delete from MySQL; put this in separate module ! conn = pymysql.connect(host="localhost", user="******", password=""); cursor = conn.cursor(); select_sql = "select title from howto.topics where title='" \ + utility.escape(title) + "';"; result = cursor.execute(select_sql); if result: delete_sql = "delete from howto.topics where title='" \ + utility.escape(title) + "';"; result = cursor.execute(delete_sql); conn.commit();
def trig_google(self, bot, source, target, trigger, argument): url = 'http://www.google.com/search?rls=en&q=' + utility.escape( argument) + '&ie=UTF-8&oe=UTF-8' response = utility.read_url(url) data = response["data"] data = re.sub(r"\n|\r|\r\n", "", data) data = re.sub(r" +", " ", data) print data # try to extract video result m = re.search( r'Video results for <em>.*?<\/em>.*?<td valign=top style="padding-right:10px"><a href="(.*?)" class=l.*?>(.*?)</a><br>', data) if m: text = utility.unescape(m.group(2)) text = re.sub('<.+?>', '', text) link = m.group(1) return "%s - %s | %s" % (text, link, url) # try to extract calculator result #m = re.search('<td><img src="\/images\/icons\/onebox\/calculator-40\.gif" ?width=40 height=40 alt=""><td> <td style="vertical-align:top" >(<h2 class=r( style="font-size:\d+%")?>)?<b>(.*?)<\/b>', data) m = re.search('.*?font-size:138%">(.*?)<', data) if m: answer = m.group(1) answer = answer.replace(' ×', '×').replace('<sup>', '^') answer = re.sub('<.+?>', '', answer) return answer # try to extract definition m = re.search( '<img src="\/images\/dictblue\.gif" width=40 height=30 alt=""><td valign=top.*?>(.*?)<br>', data) if m: definition = utility.unescape(m.group(1)) definition = re.sub('<.+?>', '', definition) return definition # try to extract weather m = re.search( '<b>Weather<\/b> for <b>(.+?)<\/b>.+?<b>(-?\d+).*C<\/b>.+?Current: <b>(.+?)<\/b>', data) if m: location = m.group(1) temperature = m.group(2) weather = m.group(3) return "%s: %s - %s" % (location, temperature, weather) # try to extract time m = re.search( 'alt=""><td valign=middle><b>(.*?)<\/b> .+?day \((.*?)\) - <b>Time</b> in (.*?)<\/table>', data) if m: time = m.group(1) timezone = m.group(2) location = m.group(3) location = re.sub('<.+?>', '', location) return "Time in %s: %s (%s)" % (location, time, timezone) # try to extract first hit m = re.search( '<li class=g><h3 class=r><a href="(.*?)".*?>(.*?)<\/a>(.*?)</div>', data) if m: text = utility.unescape(m.group(2)) text = re.sub('<.+?>', '', text) link = m.group(1) return "%s - %s | %s" % (text, link, url) else: return url
action = client_request.getlist("action")[0].strip() actions = {} actions[0] = "get_topic_titles_xml" actions[1] = "get_topic_solution" actions[2] = "delete_topic_entry" if action == actions[0]: #<topics><topic><title>some title</title></topic></topics> result = utility.compose_titles_xml() #contains titles #<topics><topic><title>..</title><solution>..</solution></topic></topics> #result = utility.compose_xml(); #contains title and solution elif action == actions[1]: result = client_request.getlist("topic_title")[0] (key, result) = utility.read_from_shelve_file(utility.escape(result)) elif action == actions[2]: title = client_request.getlist("topic_title")[0] #delete from shelve file result = utility.remove_entry(utility.escape(title)) #delete from MySQL; put this in separate module ! conn = pymysql.connect(host="localhost", user="******", password="") cursor = conn.cursor() select_sql = "select title from howto.topics where title='" \ + utility.escape(title) + "';" result = cursor.execute(select_sql) if result: delete_sql = "delete from howto.topics where title='" \ + utility.escape(title) + "';" result = cursor.execute(delete_sql) conn.commit()
def trig_alpha(self, bot, source, target, trigger, argument, network, **kwargs): # If to early if self.lastrun and datetime.datetime.now() - self.lastrun < datetime.timedelta(seconds=3): return "Alpha is not ready yet, wait a while." baseurl = "http://www83.wolframalpha.com/input/" url = baseurl + "?i=" + utility.escape(argument) bot.tell(network, target, "Seeking audiance with Alpha, this might take a while...") print "Trying to connect" # Ugly hack FIXME import signal signal.alarm(20) response = utility.read_url(url) data = response["data"] #print data datas = [data] answers = {} answer = None while datas: data = datas.pop(0) data = data.replace("\r", "") for line in data.split("\n"): # Descriptive text search = re.search("<span>([^<]*)</span>", line) if search: print search.group(1) if answer: #print answer if not answers.has_key(answer['desc']): answers[answer['desc']] = answer answer = {'desc': search.group(1), 'answer': []} # Data! search = re.search('alt="([^"]*)"', line) if search: print search.group(1) if answer: answer['answer'].append(search.group(1)) else: print "alpha: SHOULD NOT HAPPEN, " + search.group(1) # Download pod! search = re.search("asynchronousPod\('([^']*)'[^']*'([^']*)'[^']*'([^']*)'", line) if search: posturl = search.group(1) + "&i=" + search.group(3) print "Posting for pod at %s ..." % posturl params = urllib.urlencode({"asynchronous": 'true'}) headers = {"Host": "www83.wolframalpha.com", "Connection": "close", #"Via": "1.1 tinyproxy (tinyproxy/1.6.4)", "Accept": "text/html, */*", "Referer": "http://www83.wolframalpha.com/input/?i=22%3A00+%2B8h", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; sv-SE; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", #"Cookie": "WR_SID=85.229.221.66.1248096225817695; JSESSIONID=D6E0FA92ED59A69CC41C43244589B153", #"Accept-Encoding": "gzip,deflate", "Accept-Language": "en-us,en;q=0.7,sv;q=0.3", "X-Requested-With": "XMLHttpRequest", } conn = httplib.HTTPConnection("www83.wolframalpha.com:80") conn.request("POST", "/input/" + posturl, params, headers) response = conn.getresponse() #print response.status, response.reason d = response.read() #print "'" + d + "'" datas.append(d) conn.close() # Recalculate for moar pods search = re.search("recalculate\('([^']*)'\)", line) if search and search.group(1).strip(): print "GETs recalculate '%s' ..." % search.group(1) params = urllib.urlencode({}) headers = {"Host": "www83.wolframalpha.com", "Connection": "close", #"Via": "1.1 tinyproxy (tinyproxy/1.6.4)", "Accept": "text/html, */*", "Referer": "http://www83.wolframalpha.com/input/?i=22%3A00+%2B8h", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; sv-SE; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", #"Cookie": "WR_SID=85.229.221.66.1248096225817695; JSESSIONID=D6E0FA92ED59A69CC41C43244589B153", #"Accept-Encoding": "gzip,deflate", "Accept-Language": "en-us,en;q=0.7,sv;q=0.3", "X-Requested-With": "XMLHttpRequest", } conn = httplib.HTTPConnection("www83.wolframalpha.com:80") conn.request("GET", "/input/" + search.group(1), params, headers) response = conn.getresponse() #print response.status, response.reason datas.append(response.read()) conn.close() print "Answers:" + str(answers) restult = "" input_key = "" if answers.has_key("Input interpretation:"): input_key = "Input interpretation:" elif answers.has_key("Input:"): input_key = "Input:" else: return "Alpha does not understand." # Create result result = "Alpha on '%s' says " % answers[input_key]["answer"][0] if answers.has_key("Result:"): for ans in answers["Result:"]["answer"]: result += " " + ans result += "; " for desc,answer in answers.iteritems(): if desc not in ("Please make a selection:", "Input interpretation:", "Result:"): result += desc for ans in answer["answer"]: result += " " + ans result += "; " result = result.replace("\\n", " ") result = result.replace("|", "") result = re.sub("( ){2,}", " ", result) result = re.sub(" hour[s]*", "h", result) result = re.sub(" minute[s]*", "m", result) result = re.sub(" seconds[s]*", "s", result) result = result[:-1] + ";" self.lastrun = datetime.datetime.now() print result return result
def cmd_start(update, context): uid = update.message.from_user.id payload = get_payload(update.message.text) user = create_new_user(update) # "absorb" message - cleaner this way remove_message(update, context, user) main_text = '' lang = user.settings.language if user.settings.fsm_state in {'0', '1'}: main_text += txt['SERVICE']['start'][lang] markup = utility.gen_keyboard(txt['LANG_NAMES'], txt['LANG_PAYLOAD']) user.settings.fsm_state = FSM.LANGUAGE.value user.save() else: # send the main menu # better than sending message associated with the current state, as # there are messages with customized keyboards, etc.. main_text += txt['FSM']['3']['text'][lang].format( **user.collect_main_data() ) markup = utility.gen_keyboard( txt['FSM']['3']['markup'][lang], txt['FSM']['3']['payload'] ) # ensure that if user is in another state the payload isn't processed # just a precaution... payload = None if user.settings.last_msg_id: try: context.bot.delete_message( chat_id=uid, message_id=user.settings.last_msg_id ) except BadRequest: try: context.bot.edit_message_text( chat_id=uid, message_id=user.settings.last_msg_id, text=txt['CALLBACK']['deleted'][lang], parse_mode='HTML' ) except BadRequest: # the message can be already deleted by the user pass if payload: try: inviter = User.get_user(uid=payload) except LookupError: return inviter.add_to_invited(uid) main_text = ( txt['SERVICE']['invited_by'][lang].format( user=utility.escape(context.bot.get_chat(payload).first_name), id=inviter.user_id ) + '\n\n' + main_text ) sent_message = context.bot.send_message( chat_id=uid, text=main_text, reply_markup=markup, parse_mode='HTML' ) # bot sends a new message, so the old last_msg_id must be replaced. user.settings.last_msg_id = sent_message.message_id user.save()
def trig_alpha(self, bot, source, target, trigger, argument, network, **kwargs): # If to early if self.lastrun and datetime.datetime.now( ) - self.lastrun < datetime.timedelta(seconds=3): return "Alpha is not ready yet, wait a while." baseurl = "http://www83.wolframalpha.com/input/" url = baseurl + "?i=" + utility.escape(argument) bot.tell(network, target, "Seeking audiance with Alpha, this might take a while...") print "Trying to connect" # Ugly hack FIXME import signal signal.alarm(20) response = utility.read_url(url) data = response["data"] #print data datas = [data] answers = {} answer = None while datas: data = datas.pop(0) data = data.replace("\r", "") for line in data.split("\n"): # Descriptive text search = re.search("<span>([^<]*)</span>", line) if search: print search.group(1) if answer: #print answer if not answers.has_key(answer['desc']): answers[answer['desc']] = answer answer = {'desc': search.group(1), 'answer': []} # Data! search = re.search('alt="([^"]*)"', line) if search: print search.group(1) if answer: answer['answer'].append(search.group(1)) else: print "alpha: SHOULD NOT HAPPEN, " + search.group(1) # Download pod! search = re.search( "asynchronousPod\('([^']*)'[^']*'([^']*)'[^']*'([^']*)'", line) if search: posturl = search.group(1) + "&i=" + search.group(3) print "Posting for pod at %s ..." % posturl params = urllib.urlencode({"asynchronous": 'true'}) headers = { "Host": "www83.wolframalpha.com", "Connection": "close", #"Via": "1.1 tinyproxy (tinyproxy/1.6.4)", "Accept": "text/html, */*", "Referer": "http://www83.wolframalpha.com/input/?i=22%3A00+%2B8h", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; sv-SE; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", #"Cookie": "WR_SID=85.229.221.66.1248096225817695; JSESSIONID=D6E0FA92ED59A69CC41C43244589B153", #"Accept-Encoding": "gzip,deflate", "Accept-Language": "en-us,en;q=0.7,sv;q=0.3", "X-Requested-With": "XMLHttpRequest", } conn = httplib.HTTPConnection("www83.wolframalpha.com:80") conn.request("POST", "/input/" + posturl, params, headers) response = conn.getresponse() #print response.status, response.reason d = response.read() #print "'" + d + "'" datas.append(d) conn.close() # Recalculate for moar pods search = re.search("recalculate\('([^']*)'\)", line) if search and search.group(1).strip(): print "GETs recalculate '%s' ..." % search.group(1) params = urllib.urlencode({}) headers = { "Host": "www83.wolframalpha.com", "Connection": "close", #"Via": "1.1 tinyproxy (tinyproxy/1.6.4)", "Accept": "text/html, */*", "Referer": "http://www83.wolframalpha.com/input/?i=22%3A00+%2B8h", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; sv-SE; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", #"Cookie": "WR_SID=85.229.221.66.1248096225817695; JSESSIONID=D6E0FA92ED59A69CC41C43244589B153", #"Accept-Encoding": "gzip,deflate", "Accept-Language": "en-us,en;q=0.7,sv;q=0.3", "X-Requested-With": "XMLHttpRequest", } conn = httplib.HTTPConnection("www83.wolframalpha.com:80") conn.request("GET", "/input/" + search.group(1), params, headers) response = conn.getresponse() #print response.status, response.reason datas.append(response.read()) conn.close() print "Answers:" + str(answers) restult = "" input_key = "" if answers.has_key("Input interpretation:"): input_key = "Input interpretation:" elif answers.has_key("Input:"): input_key = "Input:" else: return "Alpha does not understand." # Create result result = "Alpha on '%s' says " % answers[input_key]["answer"][0] if answers.has_key("Result:"): for ans in answers["Result:"]["answer"]: result += " " + ans result += "; " for desc, answer in answers.iteritems(): if desc not in ("Please make a selection:", "Input interpretation:", "Result:"): result += desc for ans in answer["answer"]: result += " " + ans result += "; " result = result.replace("\\n", " ") result = result.replace("|", "") result = re.sub("( ){2,}", " ", result) result = re.sub(" hour[s]*", "h", result) result = re.sub(" minute[s]*", "m", result) result = re.sub(" seconds[s]*", "s", result) result = result[:-1] + ";" self.lastrun = datetime.datetime.now() print result return result
# get cursor to perform action #data from client is for one row (at a time) in database #request[key] -> MiniFieldStorage('topic_title','hi'); read via name, value #textbox HTML element contains 'topic_title'; represents a name-value pair #textarea HTML element contains 'topic_solution'; represents a name-value pair #for key in request:#effectively, for each data entry HTML element with name-value #works for single or multiple values, ie as single value or list of values #characters < > " are replaced with HTML entities #columns.append('"' + utility.escape(request.getlist(key)[0]) + '"'); #characters < > " are replaced with HTML entities if (key_topic_textbox_name in request) and \ (key_solution_textarea_name in request): #get and escape topic_title value topic_title = utility.escape( request.getlist(key_topic_textbox_name)[0]) #get and escape topic_solution value topic_solution = utility.escape(\ request.getlist(key_solution_textarea_name)[0]) #store topic_title,topic_solution as key,value pair in shelve file utility.write_to_shelve_file(topic_title, topic_solution) #setup text to be used in sql insert syntax, # ie in insert...values("topic_title","topic_solution"); columns.append('"' + topic_title + '"') columns.append('"' + topic_solution + '"') #compose sql insert syntax content = '(' + ','.join(columns) + ')' content = 'insert into howto.topics (title,solution) values ' + content + ';' #cursor.execute('select * from vad.contacts;');#returns number of rows found cursor.execute(content)
def trig_google(self, bot, source, target, trigger, argument): url = 'http://www.google.com/search?rls=en&q=' + utility.escape(argument) + '&ie=UTF-8&oe=UTF-8' response = utility.read_url(url) data = response["data"] #print data # try to extract video result m = re.search(r'Video results for <em>.*?<\/em>.*?<td valign=top style="padding-right:10px"><a href="(.*?)" class=l.*?>(.*?)</a><br>',data) if m: text = utility.unescape(m.group(2)) text = re.sub('<.+?>', '', text) link = m.group(1) return "%s - %s | %s" % (text, link, url) # try to extract calculator result m = re.search('<td><img src=\/images\/calc_img\.gif width=40 height=30 alt=""><td> <td nowrap (dir=ltr)?>(<h2 class=r( style="font-size:\d+%")?>)?<b>(.*?)<\/b>', data) if m: answer = m.group(4) answer = answer.replace(' ×', '\xd7').replace('<sup>', '^') answer = re.sub('<.+?>', '', answer) return answer # try to extract definition m = re.search('<img src="\/images\/dictblue\.gif" width=40 height=30 alt=""><td valign=top.*?>(.*?)<br>', data) if m: definition = utility.unescape(m.group(1)) definition = re.sub('<.+?>', '', definition) return definition # try to extract weather m = re.search('<b>Weather<\/b> for <b>(.+?)<\/b>.+?<b>(-?\d+).*C<\/b>.+?Current: <b>(.+?)<\/b>', data) if m: location = m.group(1) temperature = m.group(2) weather = m.group(3) return "%s: %s - %s" % (location, temperature, weather) # try to extract time m = re.search('alt=""><td valign=middle><b>(.*?)<\/b> .+?day \((.*?)\) - <b>Time</b> in (.*?)<\/table>', data) if m: time = m.group(1) timezone = m.group(2) location = m.group(3) location = re.sub('<.+?>', '', location) return "Time in %s: %s (%s)" % (location, time, timezone) # try to extract first hit m = re.search('<li class=g><h3 class=r><a href="(.*?)".*?>(.*?)<\/a>(.*?)</div>', data) if m: text = utility.unescape(m.group(2)) text = re.sub('<.+?>', '', text) link = m.group(1) return "%s - %s | %s" % (text, link, url) else: return url
def _manual_search_url(self, search_term): return self.URL_MANUAL_SEARCH % utility.escape(search_term)
def posten_kolli_query(kolli_id): url = 'http://posten.se/tracktrace/TrackConsignments_do.jsp?trackntraceAction=saveSearch&consignmentId=' + utility.escape( kolli_id) response = utility.read_url(url) data = response["data"] search = re.search( '(?ims)<dt>Från:</dt><dd>(.*?)</dd>.*?rightcol.*h2>.*<h3>(.*?)</h3>\s*?(.*?)(<br/>|<div).*?<dt>Vikt:</dt><dd>(.*?)</dd>', data) if search: sender = search.group(1) date = search.group(2) status = search.group(3) weight = search.group(5) if date and status: result = "%s fr\xe5n %s | %s: %s | %s" % ( weight, sender, date, re.sub("<.+?>", "", status), url) return result else: return None else: return None