def fetchProfile(self, search_name): search_url = "http://quickfind.kassad.in/profile/na/%s/" % urllib.quote(search_name) search_html = getWebpage(search_url) r = self.pbk_re.search(search_html) if not r: return None d = r.groupdict() pbk = d['pbk'] r = self.api_re.search(search_html) if not r: return None d = r.groupdict() api = d['api'] r = self.diamondprox_re.search(search_html) if not r: return None d = r.groupdict() diamondprox = d['diamondprox'] lookup_url = "http://quickfind.kassad.in/lookup?diamondprox=%s&REQUIRED_QUICKFIND_API_KEY=%s&PBK=%s®ionProxy=na&summoner=%s" % (diamondprox, api, pbk, search_name) lookup_json = getWebpage(lookup_url) try: lookup_data = json.loads(lookup_json) except: return None summoner_name = lookup_data['name'] response = collections.namedtuple('Response', ['summoner_name', 'pbk', 'diamondprox', 'api']) return response(summoner_name, pbk, diamondprox, api)
def getProfileHtml(self, summoner_id, summoner_name): #refresh the profile getWebpage("http://na.op.gg/summoner/ajax/update.json/summonerId="+summoner_id) #fetch the refreshed profile html = getWebpage(self.getProfileUrl(summoner_name)) # for validation self.getNameAndIdFromProfile(html) return html
def processCommand(self, message, trigger, arguments): assert isinstance(message, Message.Message) assert isinstance(self._chatbot, ChatBot) arguments = arguments.strip() if len(arguments) == 0: return "Syntax: " + self.syntax url = "http://www.championselect.net/champ/"+arguments data = getWebpage(url) r = self.re_champ.search(data) if not r: return "Sorry, I couldn't find info for '%s'" % arguments champ_name = r.groupdict()['champ'] parts = data.split('is strong against</h2>',1) weak = self.re_counter.findall(parts[0]) strong = self.re_counter.findall(parts[1]) weak_str = ", ".join(weak[:5]) strong_str = ", ".join(strong[:5]) ret = "%s is strong against %s, but weak against %s" % (champ_name, strong_str, weak_str) return ret
def getLastMatch(self, skip_num=0): match_url = "http://quickfind.kassad.in/lookup/match?diamondprox=%s&REQUIRED_QUICKFIND_API_KEY=%s&PBK=%s®ionProxy=na&summoner=%s" % (self._diamondprox, self._api, self._pbk, self._summoner_name) match_json = getWebpage(match_url) try: match_data = json.loads(match_json) except: return None match_html = match_data['escaped_html'] r = [m.groupdict() for m in self.lastmatch_re.finditer(match_html)] #r2 = self.lastmatch_re.findall(match_html) if not r or len(r) < skip_num+1: return None d = r[skip_num] champion_name = d['champion'] win = d['win_or_loss'] == 'WIN' game_type = d['map_and_queue'] how_long_ago = self.pretty_date(parse(d['match_time'] + '-0700')) kda = d['kda'].split("/") kills = kda[0] deaths = kda[1] assists = kda[2] cs = d['cs'] gold = d['gold'] duration = None matchstats = SummonerMatchStats("kassad.in", self._summoner_name, champion_name, win, game_type, kills, deaths, assists, cs, gold, duration, how_long_ago) return matchstats
def MWDefine(cls, word): try: c = getWebpage("http://www.merriam-webster.com/dictionary/%s" % urllib.quote(word)) except: c = "" #<b>1 valid = True try: c = c.split('<span class="ssens">',1)[1] c = c.split('</span></div>')[0] if c.find('<span class="ssens">') >= 0: c = c.split('<span class="ssens">')[0] valid = len(c) > 1 except: valid = False if valid: c = stripHTML(c).strip() if c.find(": ") == 0: c = c[2:] if len(c) > 150: c = c[:150]+"..." return [True, c] else: return [False,""]
def getSummonerNameAndId(self, summoner_name): if OpggSummoner.summoner_id_cache.has_key(summoner_name): return summoner_name, OpggSummoner.summoner_id_cache[summoner_name] html = getWebpage(self.getProfileUrl(summoner_name)) summoner_name, summoner_id = self.getNameAndIdFromProfile(html) OpggSummoner.summoner_id_cache[summoner_name] = summoner_id return summoner_name, summoner_id
def processCommand(self, message, trigger, arguments): assert isinstance(message, Message.Message) assert isinstance(self._chatbot, ChatBot) arguments = arguments.strip() arg_parts = arguments.split(',') if len(arg_parts) != 2: return "Syntax: " + self.syntax skill = arg_parts[1].strip().upper() if not skill in self.skill_map: return "Unknown skill label '%s'. Please specify passive, Q, W, E, or R" % skill skill_seq = self.skill_map[skill] champion_name = self.formatChampName(arg_parts[0].strip()) if champion_name is None : return "Unknown champion: " + arg_parts[0] url = "http://gameinfo.na.leagueoflegends.com/en/game-info/champions/"+champion_name+'/' data = getWebpage(url) soup = BeautifulSoup(data) champname_node = soup.select('#champ_header h1') if not champname_node or len(champname_node) == 0: return "Sorry, I couldn't find info for champion '%s'" % champion_name formatted_champ_name = champname_node[0].string passive_div = soup.select('#spell-passive') if not passive_div or len(passive_div) == 0: return "Sorry, I couldn't find info on skill '%s' for %s" % (skill, formatted_champ_name) skills_container = passive_div[0].parent.parent skill_nodes = skills_container.select(".section-wrapper-content-wrapper > div.gs-container") if not skill_nodes or len(skill_nodes) <= skill_seq: return "Sorry, I couldn't parse the info on skill '%s' for %s" % (skill, formatted_champ_name) try: skill_node = skill_nodes[skill_seq] skill_name = self.requiredSelect(skill_node, 'h3').string skill_des = self.flattenRequiredSelect(skill_node, 'p') skill_des = skill_des.replace("Range: ", " Range: ") skill_des = re.sub(r'\s{2,}', '\n', skill_des) except: return "Sorry, I couldn't parse the info on skill '%s' for %s" % (skill, formatted_champ_name) ret = "%s (%s) for %s:\n%s" % (skill_name, skill, formatted_champ_name, skill_des) re.sub("\n\n+" , "\n", ret) return ret
def getWOEID(self, location): if location is None: return None url = "http://where.yahooapis.com/v1/places.q('%s')?appid=%s" % (urllib.quote(location), self._chatbot._config['yahoo_appid']) page = getWebpage(url) r = self.woe_re.search(page) if not r: return None d = r.groupdict() return d['woeid']
def getDivision(self): season_url = "http://quickfind.kassad.in/lookup/season3?diamondprox=%s&REQUIRED_QUICKFIND_API_KEY=%s&PBK=%s®ionProxy=na&summoner=%s" % (self._diamondprox, self._api, self._pbk, self._summoner_name) season_json = getWebpage(season_url) try: season_data = json.loads(season_json) except: return None division = season_data['right'].split('<br> ')[1] lp = '' if " LP," in season_data['left']: lp = " (%s LP)" % season_data['left'].split(' LP,')[0] return division+lp
def MWDefine(cls, word, key): try: url = "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/%s?key=%s" % (urllib.quote(word), key) c = getWebpage(url) print c root = et.fromstring(c) answer = "".join(root.findall('.//dt')[0].itertext()) if answer.find(":") == 0: answer = answer[1:] answer = answer.strip() if len(answer) > 150: answer = answer[:150]+"..." return [True, answer] except: print traceback.format_exc() return [False,""]
def loadItems(self, url): self.boots = [] self.non_boots = [] json_string = getWebpage(url) data = json.loads(json_string) items = data["data"] # hardcoded (yolo) list of items not usable in ABAM # this is probably because they are not allowed in that game/map type self.item_blacklist = [ "The Lightbringer", "Wriggle's Lantern", "Spirit of the Ancient Golem", "Spirit of the Elder Lizard", "Spirit of the Spectral Wraith", "Ruby Sightstone", "Ohmwrecker", "The Bloodthirster", "Blackfire Torch", "Grez's Spectral Lantern", "Odyn's Veil", "game_item_displayname_2051", "Sword of the Occult", "Mejai's Soulstealer", "Guardian Angel", "Overlord's Bloodmail", "Wooglet's Witchcap" ] for item in items.itervalues(): if item["name"] in self.item_blacklist: continue if "Enchantment: " in item["name"] or "Augment: " in item["name"]: continue if "Boots" in item["tags"] and "from" in item and len(item["from"]) > 0: self.boots.append(item["name"]) continue if "Boots" not in item["tags"] and "from" in item and len(item["from"]) > 0 and "into" in item and len(item["into"]) == 0: self.non_boots.append(item["name"]) continue
def getWeather(self, woeid): if woeid is None: return None WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0' url = "http://weather.yahooapis.com/forecastrss?w=%s" % woeid condition = None location = None forecasts = [] try: page = getWebpage(url) dom = minidom.parseString(page) for node in dom.getElementsByTagNameNS(WEATHER_NS, 'forecast'): forecasts.append({ 'date': node.getAttribute('date'), 'low': node.getAttribute('low'), 'high': node.getAttribute('high'), 'condition': node.getAttribute('text') }) ylocation = dom.getElementsByTagNameNS(WEATHER_NS, 'location')[0] location = { 'city': ylocation.getAttribute('city'), 'region': ylocation.getAttribute('region') } ycondition = dom.getElementsByTagNameNS(WEATHER_NS, 'condition')[0] condition = { 'current_condition': ycondition.getAttribute('text'), 'current_temp': ycondition.getAttribute('temp') } except: condition = None if condition is None: return None str_location = "WOEID %s" % woeid if location is None else "%s, %s" % (location['city'], location['region']) return (str_location, "%s and %s degrees" % (condition['current_condition'], condition['current_temp']))
def UrbanDefine(cls, word): try: c = getWebpage("http://www.urbandictionary.com/define.php?term=%s" % urllib.quote(word)) except: c = "" ud_def_regex = """<div class="definition">(.+?)</div>""" c = re.findall(ud_def_regex, c, re.MULTILINE| re.DOTALL) if len(c) > 0: c = stripHTML(c[0]) c = c.replace('\n',' ') c = c.replace(' ',' ') c = c.replace(' ',' ') c = c.replace(' ',' ') if len(c) > 300: c = c[:300]+"..." return [True,c] else: return [False,""]
def UrbanDefine(cls, word): try: c = getWebpage("http://www.urbandictionary.com/define.php?term=%s" % urllib.quote(word)) except: c = "" ud_def_regex = """<div class='word'>.*?<a .*?>(.+?)</a>.*?<div class='meaning'>(.+?)</div>""" c = re.findall(ud_def_regex, c, re.MULTILINE| re.DOTALL) if len(c) > 0: word_result = stripHTML(c[0][0]) def_result = stripHTML(c[0][1]) def_result = def_result.replace('\n',' ') def_result = def_result.replace(' ',' ') def_result = def_result.replace(' ',' ') def_result = def_result.replace(' ',' ') if len(def_result) > 300: def_result = def_result[:300]+"..." return [True,word_result,def_result] else: return [False,None,None]
def GoogleDefine(cls, word): closing_string = "</li>" starting_string = """<li style="list-style-type:decimal">""" try: c = getWebpage("http://www.google.com/search?hl=en&lr=&safe=off&c2coff=1&q=%s&btnG=Search" % urllib.quote("define:\"" + word + "\"")) except: c = "" c = c.replace("\r","").replace("\n","") if c.find(starting_string) < 0: return [False,""] c = c.split(starting_string,1)[1] if c.find(closing_string) < 0: return [False,""] result = c.split(closing_string)[0].strip() if(result.find("<br>")): result = result.split("<br>")[0] if len(result) > 150: result = result[:150]+"..." result = stripHTML(result) return [True,result]
def processCommand(self, message, trigger, arguments): assert isinstance(message, Message.Message) assert isinstance(self._chatbot, ChatBot) args = arguments.strip() if args == "": return "You must provide a movie name to get a review for." else: rotten = getWebpage("http://www.rottentomatoes.com/search/search.php?searchby=movies&search=%s" % urllib.quote(args)) rotten = rotten.replace("\n","").replace("\r","") if rotten.find("Search Results for :") > -1: rotten_regex0 = '<ul id="movie_results_ul".*?<a target="_top" href="/m/(?P<url>[^"]+)' #rotten_regex0 = '<ul id="movie_results_ul".*?<a target="_top" href="/m/(?P<url>[^"]+)[^>]+>(?P<movie_name>[^<]+)</a>' redirect = re.findall(rotten_regex0, rotten, re.MULTILINE) if len(redirect) >= 1: rotten = getWebpage("http://www.rottentomatoes.com/m/%s/" % redirect[0]) rotten = rotten.replace("\n","").replace("\r","") rotten_regex1 = """<span itemprop="ratingValue" id="all-critics-meter" class="meter certified numeric ">(\d+)</span>""" review = re.findall(rotten_regex1, rotten, re.MULTILINE) if len(review) != 1: found_freshness = False freshness = "" else: found_freshness = True freshness = review[0] rotten_regex2 = """Average Rating: <span>([^<]+)</span><br />""" review = re.findall(rotten_regex2, rotten, re.MULTILINE) if len(review) > 0: found_avg_rating = True avg_rating = review[0] else: found_avg_rating = False avg_rating = "" rotten_regexReview = """<p class="critic_consensus">(.+?)</p>""" review = re.findall(rotten_regexReview, rotten, re.MULTILINE) if len(review) > 0: found_critic_consensus = True critic_consensus = review[0] else: found_critic_consensus = False critic_consensus = "" if found_freshness or found_avg_rating: rotten_regex3 = """<title>([^<]+) - Rotten Tomatoes</title>""" review = re.findall(rotten_regex3, rotten, re.MULTILINE) if len(review) != 1: movie_title = args else: movie_title = review[0].split(" - ")[0] response = "" if found_freshness and found_avg_rating: response += "'%s' recieved a freshness score of %s%% from rottentomatos.com with an average rating of %s." % (movie_title, freshness, avg_rating.strip()) elif found_avg_rating: response += "'%s' recieved an average rating of %s." % (movie_title, avg_rating.strip()) elif found_freshness: response += "'%s' recieved a freshness score of %s%% from rottentomatos.com." % (movie_title, freshness) if found_critic_consensus: if len(response) > 0: response = response + "\n" response = response + "\"%s\"" % (stripHTML(critic_consensus)) return response else: return "I haven't seen '%s'." % args