def show(self, bugId): assert bugId.isdigit(), "bug id has to be a number" html = geturl2(self.show_url(bugId)) data = {} stat = '' for line in html.splitlines(): line = line.strip() if not line: continue elif '<td headers="category">' in line: stat = 'category' elif '<td headers="status">' in line: stat = 'status' elif '<td headers="assignedto">' in line: stat = 'assigned to' elif '<td headers="os">' in line: data['os'] = striphtml(line).strip() elif '<td headers="severity">' in line: data['severity'] = striphtml(line).strip() elif '<td headers="priority">' in line: data['priority'] = striphtml(line).strip() elif '<td headers="reportedver">' in line: data['version'] = striphtml(line).strip() elif '<h2 class="summary' in line: stat = 'summary' elif '<a href="#comments">Comments (' in line: data['comments'] = line.split('(', 1)[1].split(')')[0] # stats elif stat: if stat in ['category', 'status', 'assigned to', 'summary']: data[stat] = striphtml(line).strip() stat = '' return data
def handle_rc(bot, event): """ arguments: <file>|<url> - execute a .tl resource file with bot commands. """ if not event.rest: event.missing("<file>|<url>") ; return if not getmainconfig().allowrc: event.reply("rc mode is not enabled") ; return teller = 0 t = event.rest waiting = [] try: try: if getmainconfig().allowremoterc and t.startswith("http"): data = geturl2(t) else: data = open(t, 'r').read() except IOError as ex: event.reply("I/O error: %s" % str(ex)) ; return if not data: event.reply("can't get data from %s" % event.rest) ; return for d in data.split("\n"): i = d.strip() if not i: continue if i.startswith("#"): continue e = cpy(event) e.txt = "%s" % i.strip() e.direct = True bot.put(e) waiting.append(e) teller += 1 event.reply("%s commands executed" % teller) except Exception as ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
def show(self, bugId): assert bugId.isdigit(), "bug id has to be a number" tsv = geturl2(self.show_url(bugId)+'?format=tab').splitlines() keys = [x.strip() for x in tsv[0].split()] part = tsv[1].split('\t') data = dict(list(zip(keys, part))) return data
def api(self, mount, size=30, options={}): url = 'http://api.stackoverflow.com/1.0%s/%s?body=true&pagesize=%s' % (mount, urllib.parse.urlencode(options), size) if self.api_key is not None: url += '&key=%s' % self.api_key content = geturl2(url, timeout=15, dobytes=True) if not content: return None zipped = gzip.GzipFile(fileobj=content).read() return str(zipped, "utf-8")
def list(self): csv = geturl2(self.list_url_summary()) num = 0 for line in csv.splitlines(): try: num += int(line.split(',')[1]) except ValueError: pass bugs = [] if num > 100: bugs.append('showing 100-%d' % num) csv = geturl2(self.list_url()) for line in csv.splitlines()[1:]: part = line.split(',') bugs.append('%s (%s)' % (part[0], part[1].replace('"', ''))) if len(bugs) > 100: break return bugs
def getplus(target): credentials = import_byfile("credentials", getdatadir() + os.sep + "config" + os.sep + "credentials.py") url = "https://www.googleapis.com/plus/v1/people/%s/activities/public?alt=json&pp=1&key=%s" % (target, credentials.googleclient_apikey) result = geturl2(url) data = json.loads(result) res = [] for item in data['items']: i = LazyDict(item) res.append("%s - %s - %s" % (i.actor['displayName'], i['title'], item['url'])) return res
def cloneurl(self, url, auth): """ add feeds from remote url. """ data = geturl2(url) got = [] for line in data.split('\n'): try: (name, url) = line.split() except ValueError: logging.debug("can't split %s line" % line) continue if url.endswith('<br>'): url = url[:-4] self.add(name, url, auth) got.append(name) return got
def handle_gcalc(bot, ievent): """ arguments: <expression> - use google calc. """ if len(ievent.args) > 0: expr = " ".join(ievent.args).replace("+", "%2B").replace(" ", "+") else: ievent.missing('Missing an expression') ; return #req = urllib.request.Request(url % expr, None, {'User-agent': useragent()}) data = geturl2(url % expr) try: rhs = data.split("rhs")[1].split("\"")[1] lhs = data.split("lhs")[1].split("\"")[1] if rhs and lhs: ievent.reply("%s = %s" % (lhs,rhs.replace('\\x26#215;', '*').replace('\\x3csup\\x3e', '**').replace('\\x3c/sup\\x3e', ''))) else: ievent.reply("hmmm can't get a result ..") except Exception as ex: ievent.reply(str(ex))
def geturl_validate(url): """ validate url """ url = urlvalidate % urllib.parse.urlencode({'uri': url}) try: result = geturl2(url) except IOError as ex: try: errno = ex[0] except IndexError: handle_exception() ; return return False if not result: return False results = {} for key in list(re_html_valid.keys()): results[key] = re_html_valid[key].search(result) if results[key]: results[key] = results[key].group(1) else: results[key] = '(unknown)' return results
def list(self): tsv = geturl2(self.list_url()).splitlines() #print tsv bugs = [] keys = [x.strip() for x in tsv[0].split()] for item in tsv[1:]: data = {} part = item.split('\t') # dirty hack to allow unescaped multilines in trac #if part[0].isdigit() and part[1].isdigit() and len(part) == len(keys): if True: #data = dict(zip(keys, part)) try: bugs.append('%s (%s)' % (part[1], part[0])) except IndexError: pass return bugs
def show(self, bugId): assert bugId.isdigit(), "bug id has to be a number" html = geturl2(self.show_url(bugId)) if 'APPLICATION ERROR #1100' in html: raise BugTrackerNotFound('issue not found') data = {'notes': 0} stat = '' skip = 0 for line in html.splitlines(): line = line.strip().replace('\t', '') if skip > 0: skip -= 1 continue elif not line: continue elif '<!-- Category -->' in line: skip = 1 stat = 'category' elif '<!-- Severity -->' in line: skip = 1 stat = 'severity' elif '<!-- Reproducibility -->' in line: skip = 1 stat = 'reproducibility' elif '<!-- Reporter -->' in line: skip = 3 stat = 'reporter' elif '<!-- Priority -->' in line: skip = 1 stat = 'priority' elif '<!-- Resolution -->' in line: skip = 1 stat = 'resolution' elif '<!-- Status -->' in line: skip = 3 stat = 'status' elif '<!-- Summary -->' in line: skip = 4 stat = 'summary' elif '<td class="bugnote-public">' in line: data['notes'] += 1 # stats elif stat: if stat in ['category', 'severity', 'reproducibility', 'reporter', 'priority', 'resolution', 'status', 'summary']: data[stat] = striphtml(line) stat = '' return data
def geturl_title(url): """ fetch title of url """ try: result = geturl2(url) except urllib.error.HTTPError as ex: logging.warn("HTTPError: %s" % str(ex)) ; return False except urllib.error.URLError as ex: logging.warn("URLError %s" % str(ex)) ; return False except IOError as ex: try: errno = ex[0] except IndexError: handle_exception() ; return return False if not result: return False test_title = re_html_title.search(result) if test_title: # try to find an encoding and standardize it to utf-8 encoding = get_encoding(result) title = test_title.group(1).replace('\n', ' ') title = title.strip() return decode_html_entities(title) return False
def markovlearnurl(url): """ learn an url """ lines = 0 logging.warn("learning %s" % url) try: f = geturl2(url) except urllib.error.URLError as ex: logging.warn("error learning from url: %s" % url) return [] for line in f.split("\n"): line = striphtml(line) if lines % 10 == 0: time.sleep(0.01) line = line.strip() if not line: continue markovtalk_learn(line) lines += 1 logging.warn("learning %s done" % url) return lines
def handle_urban(bot, ievent): """ urban <what> .. search urban for <what> """ if len(ievent.args) > 0: what = " ".join(ievent.args) else: ievent.missing("<search query>") return try: data = geturl2(url + urllib.parse.quote_plus(what)) if not data: ievent.reply("word not found: %s" % what) return data = json.loads(data) if data["result_type"] == "no_result": ievent.reply("word not found: %s" % what) return res = [] for r in data["list"]: res.append(r["definition"]) ievent.reply("result: ", res) except Exception as ex: ievent.reply(str(ex))
def handle_imdb(bot, event): """ arguments: <query> - query the imdb databae at http://www.deanclatworthy.com/imdb/ """ if not event.rest: event.missing("<query>") ; return query = event.rest.strip() urlquery = query.replace(" ", "+") result = {} res = geturl2(URL % urlquery) if not res: event.reply("%s didn't return a result" % (URL % urlquery)) ; return try: rawresult = getjson().loads(res) except ValueError: event.reply("sorry cannot parse data returned from the server: %s" % res) ; return # the API are limited to 30 query per hour, so avoid querying it just for testing purposes # rawresult = {u'ukscreens': 0, u'rating': u'7.7', u'genres': u'Animation, Drama,Family,Fantasy,Music', u'title': u'Pinocchio', u'series': 0, u'country': u'USA', u'votes': u'23209', u'languages': u'English', u'stv': 0, u'year': None, u'usascreens': 0, u'imdburl': u'http://www.imdb.com/title/tt0032910/'} if not rawresult: event.reply("couldn't look up %s" % query) ; return if 'error' in rawresult: event.reply("%s" % rawresult['error']) ; return for key in list(rawresult.keys()): if not rawresult[key]: result[key] = "n/a" else: result[key] = rawresult[key] for key in list(result.keys()): try: result[key] = striphtml(decode_html_entities(str(rawresult[key]))) except AttributeError: pass if "year" in list(rawresult.keys()): event.reply("%(title)s (%(country)s, %(year)s): %(imdburl)s | rating: %(rating)s (out of %(votes)s votes) | Genres %(genres)s | Language: %(languages)s" % result ) else: event.reply("%(title)s (%(country)s): %(imdburl)s | rating: %(rating)s (out of %(votes)s votes) | Genres %(genres)s | Language: %(languages)s" % result )
def querygeoipserver(ip): try: ipinfo = getjson().loads(geturl2(URL % ip)) except ValueError: ipinfo = None return ipinfo
def getresults(url): logging.warn(url) result = geturl2(url) return result
def handle_weather(bot, ievent): """ show weather using Google's weather API """ userhost = "" loc = "" try: nick = ievent.rest if nick: userhost = getwho(bot, nick) if not userhost: pass else: try: name = bot.users.getname(userhost) if not name: ievent.reply("%s is not known with the bot" % nick) ; return us = UserState(name) loc = us['location'] except KeyError: ievent.reply("%s doesn't have his location set in userstate" % nick) ; return except KeyError: pass if not loc: if ievent.rest: loc = ievent.rest else: ievent.missing('<nick>|<location>') ; return query = urlencode({'weather':loc}) try: weathertxt = geturl2('http://www.google.ca/ig/api?%s' % query) except urllib.error.HTTPError as ex: ievent.reply(str(ex)) ; return if 'problem_cause' in bytes(weathertxt, "utf-8"): logging.error(weathertxt) ievent.reply('an error occured looking up data for %s' % loc) return logging.debug("weather - got reply: %s" % weathertxt) resultstr = "" if weathertxt: gweather = minidom.parseString(weathertxt) gweather = gweather.getElementsByTagName('weather')[0] if ievent.usercmnd == "weather": info = gweather.getElementsByTagName('forecast_information')[0] if info: city = info.getElementsByTagName('city')[0].attributes["data"].value zip = info.getElementsByTagName('postal_code')[0].attributes["data"].value time = info.getElementsByTagName('current_date_time')[0].attributes["data"].value weather = gweather.getElementsByTagName('current_conditions')[0] condition = weather.getElementsByTagName('condition')[0].attributes["data"].value temp_f = weather.getElementsByTagName('temp_f')[0].attributes["data"].value temp_c = weather.getElementsByTagName('temp_c')[0].attributes["data"].value humidity = weather.getElementsByTagName('humidity')[0].attributes["data"].value try: wind = weather.getElementsByTagName('wind_condition')[0].attributes["data"].value except IndexError: wind = "" try: wind_km = round(int(wind[-6:-4]) * 1.609344) except ValueError: wind_km = "" if (not condition == ""): condition = " Oh, and it's " + condition + "." resultstr = "As of %s, %s (%s) has a temperature of %sC/%sF with %s. %s (%s km/h).%s" % (time, city, zip, temp_c, temp_f, humidity, wind, wind_km, condition) elif ievent.usercmnd == "forecast": forecasts = gweather.getElementsByTagName('forecast_conditions') for forecast in forecasts: condition = forecast.getElementsByTagName('condition')[0].attributes["data"].value low_f = forecast.getElementsByTagName('low')[0].attributes["data"].value high_f = forecast.getElementsByTagName('high')[0].attributes["data"].value day = forecast.getElementsByTagName('day_of_week')[0].attributes["data"].value low_c = round((int(low_f) - 32) * 5.0 / 9.0) high_c = round((int(high_f) - 32) * 5.0 / 9.0) resultstr += "[%s: F(%sl/%sh) C(%sl/%sh) %s]" % (day, low_f, high_f, low_c, high_c, condition) if not resultstr: ievent.reply('%s not found!' % loc) ; return else: ievent.reply(resultstr)