Esempio n. 1
0
def handle_translate(bot, event):
    if not event.rest:
        event.missing("<from> <to> <text>")
        return
    query = parse_pair(event.rest.strip())
    if not query:
        event.missing("<from> <to> <text>")
        return
#    event.reply(URL % query)
    rawresult = {}
    try:
        rawresult = getjson().loads(geturl2(URL % query))
    except:
        event.reply("Query to Google failed")
        return


# debug
#     rawresult = {"responseData": {"translatedText":"test"}, "responseDetails": None, "responseStatus": 201}
# logging.warn(URL % query)
# logging.warn(rawresult)
    if rawresult['responseStatus'] != 200:
        event.reply("Error in the query: ", rawresult)
        return
    if 'responseData' in rawresult:
        if 'translatedText' in rawresult['responseData']:
            translation = rawresult['responseData']['translatedText']
            event.reply(translation)
        else:
            event.reply("No text available")
    else:
        event.reply("Something is wrong, probably the API changed")
Esempio n. 2
0
def handle_rc(bot, event):
    """ arguments: <file>|<url> - execute a .jsb resource file with bot commands. """
    if not event.rest: event.missing("<file>|<url>") ; return
    if not getmainconfig().allowrc: event.reply("rc mode is not enabled") ; return
    teller = 0
    t = event.rest
    waiting = []
    try:
        try:
            if getmainconfig().allowremoterc and t.startswith("http"): data = geturl2(t)
            else: data = open(t, 'r').read()
        except IOError, ex: event.reply("I/O error: %s" % str(ex)) ; return
        if not data: event.reply("can't get data from %s" % event.rest) ; return
        for d in data.split("\n"):
            i = d.strip()
            if not i: continue
            if i.startswith("#"): continue
            e = cpy(event)
            e.txt = "%s" % i.strip()
            e.direct = True
            bot.put(e)
            waiting.append(e)
            teller += 1
        event.reply("%s commands executed" % teller)
    except Exception, ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
Esempio n. 3
0
    def show(self, bugId):
        assert bugId.isdigit(), "bug id has to be a number"
        tsv = geturl2(self.show_url(bugId)+'?format=tab').splitlines()
	keys = map(lambda x: x.strip(), tsv[0].split())
	part = tsv[1].split('\t')
	data = dict(zip(keys, part))
        return data
Esempio n. 4
0
def geturl_title(url):
    """ fetch title of url """
    try:
        result = geturl2(url)
    except urllib2.HTTPError, ex:
        logging.warn("HTTPError: %s" % str(ex))
        return False
Esempio n. 5
0
    def show(self, bugId):
        assert bugId.isdigit(), "bug id has to be a number"
        html = geturl2(self.show_url(bugId))
        data = {}
        stat = ''
        for line in html.splitlines():
            line = line.strip()
            if not line:
                continue

            elif '<td headers="category">' in line:
                stat = 'category'
            elif '<td headers="status">' in line:
                stat = 'status'
            elif '<td headers="assignedto">' in line:
                stat = 'assigned to'
            elif '<td headers="os">' in line:
                data['os'] = striphtml(line).strip()
            elif '<td headers="severity">' in line:
                data['severity'] = striphtml(line).strip()
            elif '<td headers="priority">' in line:
                data['priority'] = striphtml(line).strip()
            elif '<td headers="reportedver">' in line:
                data['version'] = striphtml(line).strip()
            elif '<h2 class="summary' in line:
                stat = 'summary'
            elif '<a href="#comments">Comments (' in line:
                data['comments'] = line.split('(', 1)[1].split(')')[0]
            # stats
            elif stat:
                if stat in ['category', 'status', 'assigned to', 'summary']:
                    data[stat] = striphtml(line).strip()
                stat = ''
        return data
Esempio n. 6
0
def get_temp(bot, ievent):
    """No arguments - just fetch the temp. """
    try:
        thermopage = geturl2('http://www.met.wau.nl/veenkampen/data/C_current.txt',timeout=10).split()
        currentline = thermopage[-1]
        data = currentline.split(',')
        temp = str(round(float(data[2]),1))
        humid = str(round(float(data[8]),1))
        precip = data[19]
        pressure = str(float(data[21]))
        windspeed = str(round(float(data[22]),1))
        windangle = float(data[26])+11.25
        winddir = ''
        if windangle>0 and windangle<= 22.5: winddir = 'N'
        if windangle>22.5 and windangle<= 45: winddir = 'NNE'
        if windangle>45 and windangle<= 67.5: winddir = 'NE'
        if windangle>67.5 and windangle<= 90: winddir = 'ENE'
        if windangle>90 and windangle<= 112.5: winddir = 'E'
        if windangle>112.5 and windangle<= 135: winddir = 'ESE'
        if windangle>135 and windangle<= 157.5: winddir = 'SE'
        if windangle>157.5 and windangle<= 180: winddir = 'SSE'
        if windangle>180 and windangle<= 202.5: winddir = 'S'
        if windangle>202.5 and windangle<= 225: winddir = 'SSW'
        if windangle>225 and windangle<= 247.5: winddir = 'SW'
        if windangle>247.5 and windangle<= 270: winddir = 'WSW'
        if windangle>270 and windangle<= 292.5: winddir = 'W'
        if windangle>292.5 and windangle<= 315: winddir = 'WNW'
        if windangle>315 and windangle<= 337.5: winddir = 'NW'
        if windangle>337.5 and windangle<= 360: winddir = 'NNW'
        if windangle>360 and windangle<= 382.5: winddir = 'N'
        ievent.reply(temp+' C, '+humid+'% humidity, '+windspeed+' m/s '+winddir+', '+precip+' mm precipitation, '+pressure+' kPa.')
    except urllib2.URLError:
        ievent.reply('Cannot read from server.')        
    except:
        ievent.reply('What is this madness? '+ str(sys.exc_info()[0]))
Esempio n. 7
0
 def api(self, mount, size=30, options={}):
     url = 'http://api.stackoverflow.com/1.0%s/%s?body=true&pagesize=%s' % (
         mount, urllib.urlencode(options), size)
     if self.api_key is not None:
         url += '&key=%s' % self.api_key
     content = StringIO.StringIO(geturl2(url, timeout=15))
     return gzip.GzipFile(fileobj=content).read()
Esempio n. 8
0
    def show(self, bugId):
        assert bugId.isdigit(), "bug id has to be a number"
        html = geturl2(self.show_url(bugId))
        data = {}
        stat = ''
        for line in html.splitlines():
            line = line.strip()
            if not line:
                continue
            

            elif '<td headers="category">' in line:
                stat = 'category'
            elif '<td headers="status">' in line:
                stat = 'status'
            elif '<td headers="assignedto">' in line:
                stat = 'assigned to'
            elif '<td headers="os">' in line:
                data['os'] = striphtml(line).strip()
            elif '<td headers="severity">' in line:
                data['severity'] = striphtml(line).strip()
            elif '<td headers="priority">' in line:
                data['priority'] = striphtml(line).strip()
            elif '<td headers="reportedver">' in line:
                data['version'] = striphtml(line).strip()
            elif '<h2 class="summary' in line:
                stat = 'summary'
            elif '<a href="#comments">Comments (' in line:
                data['comments'] = line.split('(', 1)[1].split(')')[0]
            # stats
            elif stat:
                if stat in ['category', 'status', 'assigned to', 'summary']:
                    data[stat] = striphtml(line).strip()
                stat = ''
        return data
Esempio n. 9
0
def handle_rc(bot, event):
    """ import aliases by url. assumes a .RC file. 1 alias per line """
    if not event.rest:
        event.missing("<file>|<url>")
        return
    teller = 0
    t = event.rest
    waiting = []
    try:
        try:
            if t.startswith("http"): data = geturl2(t)
            else: data = open(t, 'r').read()
        except IOError, ex:
            event.reply("I/O error: %s" % str(ex))
            return
        if not data:
            event.reply("can't get data from %s" % event.rest)
            return
        for d in data.split("\n"):
            i = d.strip()
            if not i: continue
            if i.startswith("#"): continue
            e = cpy(event)
            e.txt = "%s" % i.strip()
            e.direct = True
            bot.put(e)
            waiting.append(e)
            #result = bot.docmnd(event.userhost, event.channel, i, wait=1, event=event)
            #if result: result.waitall()
            teller += 1
        #waitevents(waiting)
        event.reply("%s commands executed" % teller)
Esempio n. 10
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     tsv = geturl2(self.show_url(bugId) + '?format=tab').splitlines()
     keys = map(lambda x: x.strip(), tsv[0].split())
     part = tsv[1].split('\t')
     data = dict(zip(keys, part))
     return data
Esempio n. 11
0
def handle_rc(bot, event):
    """ arguments: <file>|<url> - execute a .jsb resource file with bot commands. """
    if not event.rest:
        event.missing("<file>|<url>")
        return
    if not getmainconfig().allowrc:
        event.reply("rc mode is not enabled")
        return
    teller = 0
    t = event.rest
    waiting = []
    try:
        try:
            if getmainconfig().allowremoterc and t.startswith("http"):
                data = geturl2(t)
            else:
                data = open(t, 'r').read()
        except IOError, ex:
            event.reply("I/O error: %s" % str(ex))
            return
        if not data:
            event.reply("can't get data from %s" % event.rest)
            return
        for d in data.split("\n"):
            i = d.strip()
            if not i: continue
            if i.startswith("#"): continue
            e = cpy(event)
            e.txt = "%s" % i.strip()
            e.direct = True
            bot.put(e)
            waiting.append(e)
            teller += 1
        event.reply("%s commands executed" % teller)
Esempio n. 12
0
def handle_imdb(bot, event):
    """ arguments: <query> - query the imdb databae at http://www.deanclatworthy.com/imdb/ """
    if not event.rest:
        event.missing("<query>")
        return
    query = event.rest.strip()
    urlquery = query.replace(" ", "+")
    result = {}
    rawresult = getjson().loads(geturl2(URL % urlquery))
    # the API are limited to 30 query per hour, so avoid querying it just for testing purposes
    # rawresult = {u'ukscreens': 0, u'rating': u'7.7', u'genres': u'Animation,&nbsp;Drama,Family,Fantasy,Music', u'title': u'Pinocchio', u'series': 0, u'country': u'USA', u'votes': u'23209', u'languages': u'English', u'stv': 0, u'year': None, u'usascreens': 0, u'imdburl': u'http://www.imdb.com/title/tt0032910/'}
    if not rawresult:
        event.reply("couldn't look up %s" % query)
        return
    if 'error' in rawresult:
        event.reply("%s" % rawresult['error'])
        return
    print rawresult
    for key in rawresult.keys():
        if not rawresult[key]: result[key] = u"n/a"
        else: result[key] = rawresult[key]
    for key in result.keys():
        try:
            result[key] = striphtml(decode_html_entities(rawresult[key]))
        except AttributeError:
            pass
    if "year" in rawresult.keys():
        event.reply(
            "%(title)s (%(country)s, %(year)s): %(imdburl)s | rating: %(rating)s (out of %(votes)s votes) | Genres %(genres)s | Language: %(languages)s"
            % result)
    else:
        event.reply(
            "%(title)s (%(country)s): %(imdburl)s | rating: %(rating)s (out of %(votes)s votes) | Genres %(genres)s | Language: %(languages)s"
            % result)
Esempio n. 13
0
def handle_rc(bot, event):
    """ import aliases by url. assumes a .RC file. 1 alias per line """
    if not event.rest: event.missing("<file>|<url>") ; return
    teller = 0
    t = event.rest
    waiting = []
    try:
        try:
            if t.startswith("http"): data = geturl2(t)
            else: data = open(t, 'r').read()
        except IOError, ex: event.reply("I/O error: %s" % str(ex)) ; return
        if not data: event.reply("can't get data from %s" % event.rest) ; return
        for d in data.split("\n"):
            i = d.strip()
            if not i: continue
            if i.startswith("#"): continue
            e = cpy(event)
            e.txt = "%s" % i.strip()
            e.direct = True
            bot.put(e)
            waiting.append(e)
            #result = bot.docmnd(event.userhost, event.channel, i, wait=1, event=event)
            #if result: result.waitall()
            teller += 1
        #waitevents(waiting)
        event.reply("%s commands executed" % teller)
    except Exception, ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
Esempio n. 14
0
def handle_translate(bot, event):
    if not event.rest: 
        event.missing("<from> <to> <text>")
        return
    query = parse_pair(event.rest.strip())
    if not query: 
        event.missing("<from> <to> <text>")
        return
#    event.reply(URL % query)
    rawresult = {}
    try:
        rawresult = getjson().loads(geturl2(URL % query))
    except:
        event.reply("Query to Google failed")
        return
# debug
#     rawresult = {"responseData": {"translatedText":"test"}, "responseDetails": None, "responseStatus": 201}
    # logging.warn(URL % query)
    # logging.warn(rawresult)
    if rawresult['responseStatus'] != 200:
        event.reply("Error in the query: ", rawresult)
        return
    if 'responseData' in rawresult:
        if 'translatedText' in rawresult['responseData']:
            translation = rawresult['responseData']['translatedText']
            event.reply(translation)
        else:
            event.reply("No text available")
    else:
        event.reply("Something is wrong, probably the API changed")
Esempio n. 15
0
 def list(self):
     csv = geturl2(self.list_url_summary())
     num = 0
     for line in csv.splitlines():
         try:
             num += int(line.split(',')[1])
         except ValueError:
             pass
     bugs = []
     if num > 100:
         bugs.append('showing 100-%d' % num)
     csv = geturl2(self.list_url())
     for line in csv.splitlines()[1:]:
         part = line.split(',')
         bugs.append('%s (%s)' % (part[0], part[1].replace('"', '')))
         if len(bugs) > 100:
             break
     return bugs        
Esempio n. 16
0
def geturl_title(url):
    """ fetch title of url """
    try: result = geturl2(url)
    except urllib2.HTTPError, ex: logging.warn("HTTPError: %s" % str(ex)) ; return False
    except urllib2.URLError, ex: logging.warn("URLError %s" % str(ex)) ; return False
    except IOError, ex:
        try: errno = ex[0]
        except IndexError: handle_exception() ; return
        return False
Esempio n. 17
0
 def list(self):
     csv = geturl2(self.list_url_summary())
     num = 0
     for line in csv.splitlines():
         try:
             num += int(line.split(',')[1])
         except ValueError:
             pass
     bugs = []
     if num > 100:
         bugs.append('showing 100-%d' % num)
     csv = geturl2(self.list_url())
     for line in csv.splitlines()[1:]:
         part = line.split(',')
         bugs.append('%s (%s)' % (part[0], part[1].replace('"', '')))
         if len(bugs) > 100:
             break
     return bugs
Esempio n. 18
0
def getplus(target):
    credentials = _import_byfile("credentials", getdatadir() + os.sep + "config" + os.sep + "credentials.py")
    url = "https://www.googleapis.com/plus/v1/people/%s/activities/public?alt=json&pp=1&key=%s" % (target, credentials.googleclient_apikey)
    result = geturl2(url)
    data = json.loads(result)
    res = []
    for item in data['items']:
        i = LazyDict(item)
        res.append("%s - %s - %s" % (i.actor['displayName'], i['title'], item['url']))
    return res
Esempio n. 19
0
 def cloneurl(self, url, auth):
     """ add feeds from remote url. """
     data = geturl2(url)
     got = []
     for line in data.split('\n'):
         try: (name, url) = line.split()
         except ValueError:
             logging.debug("hubbub - cloneurl - can't split %s line" % line)
             continue
         if url.endswith('<br>'): url = url[:-4]
         self.add(name, url, auth)
         got.append(name)
     return got
Esempio n. 20
0
def handle_urban(bot, ievent):
    """ urban <what> .. search urban for <what> """
    if len(ievent.args) > 0: what = " ".join(ievent.args)
    else: ievent.missing('<search query>') ; return
    try:
	data = geturl2(url + urllib.quote_plus(what))
	if not data: ievent.reply("word not found: %s" % what) ; return
	data = json.loads(data)
	if data['result_type'] == 'no_result': ievent.reply("word not found: %s" % what) ; return
	res = []
	for r in data['list']: res.append(r['definition'])
	ievent.reply("result: ", res)
    except Exception, ex: ievent.reply(str(ex))
Esempio n. 21
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     if 'APPLICATION ERROR #1100' in html:
         raise BugTrackerNotFound('issue not found')
     data = {'notes': 0}
     stat = ''
     skip = 0
     for line in html.splitlines():
         line = line.strip().replace('\t', '')
         if skip > 0:
             skip -= 1
             continue
         elif not line:
             continue
         elif '<!-- Category -->' in line:
             skip = 1
             stat = 'category'
         elif '<!-- Severity -->' in line:
             skip = 1
             stat = 'severity'
         elif '<!-- Reproducibility -->' in line:
             skip = 1
             stat = 'reproducibility'
         elif '<!-- Reporter -->' in line:
             skip = 3
             stat = 'reporter'
         elif '<!-- Priority -->' in line:
             skip = 1
             stat = 'priority'
         elif '<!-- Resolution -->' in line:
             skip = 1
             stat = 'resolution'
         elif '<!-- Status -->' in line:
             skip = 3
             stat = 'status'
         elif '<!-- Summary -->' in line:
             skip = 4
             stat = 'summary'
         elif '<td class="bugnote-public">' in line:
             data['notes'] += 1
         # stats
         elif stat:
             if stat in [
                     'category', 'severity', 'reproducibility', 'reporter',
                     'priority', 'resolution', 'status', 'summary'
             ]:
                 data[stat] = striphtml(line)
             stat = ''
     return data
Esempio n. 22
0
def getplus(target):
    credentials = _import_byfile(
        "credentials",
        getdatadir() + os.sep + "config" + os.sep + "credentials.py")
    url = "https://www.googleapis.com/plus/v1/people/%s/activities/public?alt=json&pp=1&key=%s" % (
        target, credentials.googleclient_apikey)
    result = geturl2(url)
    data = json.loads(result)
    res = []
    for item in data['items']:
        i = LazyDict(item)
        res.append("%s - %s - %s" %
                   (i.actor['displayName'], i['title'], item['url']))
    return res
Esempio n. 23
0
    def list(self):
        tsv = geturl2(self.list_url()).splitlines()
        #print tsv
        bugs = []
	keys = map(lambda x: x.strip(), tsv[0].split())
	for item in tsv[1:]:
		data = {}
		part = item.split('\t')
		# dirty hack to allow unescaped multilines in trac
		#if part[0].isdigit() and part[1].isdigit() and len(part) == len(keys):
                if True:
                    #data = dict(zip(keys, part))
                    try: bugs.append('%s (%s)' % (part[1], part[0]))
                    except IndexError: pass
        return bugs
Esempio n. 24
0
 def show(self, bugId):
     assert bugId.isdigit(), "bug id has to be a number"
     html = geturl2(self.show_url(bugId))
     if 'APPLICATION ERROR #1100' in html:
         raise BugTrackerNotFound('issue not found')
     data = {'notes': 0}
     stat = ''
     skip = 0
     for line in html.splitlines():
         line = line.strip().replace('\t', '')
         if skip > 0:
             skip -= 1
             continue
         elif not line:
             continue
         elif '<!-- Category -->' in line:
             skip = 1
             stat = 'category'
         elif '<!-- Severity -->' in line:
             skip = 1
             stat = 'severity'
         elif '<!-- Reproducibility -->' in line:
             skip = 1
             stat = 'reproducibility'
         elif '<!-- Reporter -->' in line:
             skip = 3
             stat = 'reporter'
         elif '<!-- Priority -->' in line:
             skip = 1
             stat = 'priority'
         elif '<!-- Resolution -->' in line:
             skip = 1
             stat = 'resolution'
         elif '<!-- Status -->' in line:
             skip = 3
             stat = 'status'
         elif '<!-- Summary -->' in line:
             skip = 4
             stat = 'summary'
         elif '<td class="bugnote-public">' in line:
             data['notes'] += 1
         # stats
         elif stat:
             if stat in ['category', 'severity', 'reproducibility', 'reporter',
                 'priority', 'resolution', 'status', 'summary']:
                 data[stat] = striphtml(line)
             stat = ''
     return data
Esempio n. 25
0
 def list(self):
     tsv = geturl2(self.list_url()).splitlines()
     #print tsv
     bugs = []
     keys = map(lambda x: x.strip(), tsv[0].split())
     for item in tsv[1:]:
         data = {}
         part = item.split('\t')
         # dirty hack to allow unescaped multilines in trac
         #if part[0].isdigit() and part[1].isdigit() and len(part) == len(keys):
         if True:
             #data = dict(zip(keys, part))
             try:
                 bugs.append('%s (%s)' % (part[1], part[0]))
             except IndexError:
                 pass
     return bugs
Esempio n. 26
0
File: imdb.py Progetto: code2u/jsb
def handle_imdb(bot, event):
    """ arguments: <query> - query the imdb databae at http://www.deanclatworthy.com/imdb/ """
    if not event.rest:  event.missing("<query>") ; return
    query = event.rest.strip()
    urlquery = query.replace(" ", "+")
    result = {}
    rawresult = getjson().loads(geturl2(URL % urlquery))
    # the API are limited to 30 query per hour, so avoid querying it just for testing purposes
    # rawresult = {u'ukscreens': 0, u'rating': u'7.7', u'genres': u'Animation,&nbsp;Drama,Family,Fantasy,Music', u'title': u'Pinocchio', u'series': 0, u'country': u'USA', u'votes': u'23209', u'languages': u'English', u'stv': 0, u'year': None, u'usascreens': 0, u'imdburl': u'http://www.imdb.com/title/tt0032910/'}
    if not rawresult: event.reply("couldn't look up %s" % query) ; return
    if 'error' in rawresult: event.reply("%s" % rawresult['error']) ; return
    for key in rawresult.keys():
        if not rawresult[key]: result[key] = u"n/a"
        else: result[key] = rawresult[key]
    for key in result.keys():
        try: result[key] = striphtml(decode_html_entities(rawresult[key]))
        except AttributeError: pass
    event.reply("%(title)s (%(country)s, %(year)s): %(imdburl)s | rating: %(rating)s (out of %(votes)s votes) | Genres %(genres)s | Language: %(languages)s" % result )
Esempio n. 27
0
def handle_urban(bot, ievent):
    """ urban <what> .. search urban for <what> """
    if len(ievent.args) > 0: what = " ".join(ievent.args)
    else:
        ievent.missing('<search query>')
        return
    try:
        data = geturl2(url + urllib.quote_plus(what))
        if not data:
            ievent.reply("word not found: %s" % what)
            return
        data = json.loads(data)
        if data['result_type'] == 'no_result':
            ievent.reply("word not found: %s" % what)
            return
        res = []
        for r in data['list']:
            res.append(r['definition'])
        ievent.reply("result: ", res)
    except Exception, ex:
        ievent.reply(str(ex))
Esempio n. 28
0
def get_temp(bot, ievent):
    """No arguments - just fetch the temp. """
    try:
        thermopage = geturl2(
            'http://www.met.wau.nl/veenkampen/data/C_current.txt',
            timeout=10).split()
        currentline = thermopage[-1]
        data = currentline.split(',')
        temp = str(round(float(data[2]), 1))
        humid = str(round(float(data[8]), 1))
        precip = data[19]
        pressure = str(float(data[21]))
        windspeed = str(round(float(data[22]), 1))
        windangle = float(data[26]) + 11.25
        winddir = ''
        if windangle > 0 and windangle <= 22.5: winddir = 'N'
        if windangle > 22.5 and windangle <= 45: winddir = 'NNE'
        if windangle > 45 and windangle <= 67.5: winddir = 'NE'
        if windangle > 67.5 and windangle <= 90: winddir = 'ENE'
        if windangle > 90 and windangle <= 112.5: winddir = 'E'
        if windangle > 112.5 and windangle <= 135: winddir = 'ESE'
        if windangle > 135 and windangle <= 157.5: winddir = 'SE'
        if windangle > 157.5 and windangle <= 180: winddir = 'SSE'
        if windangle > 180 and windangle <= 202.5: winddir = 'S'
        if windangle > 202.5 and windangle <= 225: winddir = 'SSW'
        if windangle > 225 and windangle <= 247.5: winddir = 'SW'
        if windangle > 247.5 and windangle <= 270: winddir = 'WSW'
        if windangle > 270 and windangle <= 292.5: winddir = 'W'
        if windangle > 292.5 and windangle <= 315: winddir = 'WNW'
        if windangle > 315 and windangle <= 337.5: winddir = 'NW'
        if windangle > 337.5 and windangle <= 360: winddir = 'NNW'
        if windangle > 360 and windangle <= 382.5: winddir = 'N'
        ievent.reply(temp + ' C, ' + humid + '% humidity, ' + windspeed +
                     ' m/s ' + winddir + ', ' + precip +
                     ' mm precipitation, ' + pressure + ' kPa.')
    except urllib2.URLError:
        ievent.reply('Cannot read from server.')
    except:
        ievent.reply('What is this madness? ' + str(sys.exc_info()[0]))
Esempio n. 29
0
def do_imdb_title_search(query):
    """fetch the page with the search, appending the parameters string"""
    url = baseurl + '/search/title?' + query
    logging.warn(url)
    return geturl2(url)
Esempio n. 30
0
def querygeoipserver(ip):
    ipinfo = getjson().loads(geturl2(URL % ip))
    return ipinfo
Esempio n. 31
0
def querygeoipserver(ip):
    ipinfo = getjson().loads(geturl2(URL % ip))
    return ipinfo
Esempio n. 32
0
def do_imdb_title_search(query):
    """fetch the page with the search, appending the parameters string"""
    url = baseurl + '/search/title?' + query
    logging.warn(url)
    return geturl2(url)
Esempio n. 33
0
def do_imdb_api_query(query):
    url = "http://www.deanclatworthy.com/imdb/?" + query
    logging.warn(url)
    result = getjson().loads(geturl2(url))
    return result
Esempio n. 34
0
def getresults(url):
    logging.warn(url)
    result = geturl2(url)
    return result
Esempio n. 35
0
def getresults(url):
    logging.warn(url)
    result = geturl2(url)
    return result
Esempio n. 36
0
 def api(self, mount, size=30, options={}):
     url = 'http://api.stackoverflow.com/1.0%s/%s?body=true&pagesize=%s' % (mount, urllib.urlencode(options), size)
     if self.api_key is not None:
         url += '&key=%s' % self.api_key
     content = StringIO.StringIO(geturl2(url, timeout=15))
     return gzip.GzipFile(fileobj=content).read()
Esempio n. 37
0
def do_imdb_api_query(query):
    url = "http://www.deanclatworthy.com/imdb/?" + query
    logging.warn(url)
    result = getjson().loads(geturl2(url))
    return result