コード例 #1
0
ファイル: wiki.py プロジェクト: mutantmonkey/phenny
    def search(self, term):
        try:
            exactterm = format_term(term)
            exactterm = quote(exactterm)
            exacturl = self.endpoints['url'].format(exactterm)
            html = web.get(exacturl)
            return (html, exacturl)
        except HTTPError:
            pass

        term = deformat_term(term)
        term = quote(term)
        apiurl = self.endpoints['api'].format(term)

        try:
            result = json.loads(web.get(apiurl))
        except ValueError:
            return None

        result = result['query']['search']

        if not result:
            return None

        term = result[0]['title']
        term = format_term(term)
        term = quote(term)

        url = self.endpoints['url'].format(term)
        html = web.get(url)
        return (html, url)
コード例 #2
0
ファイル: iso639.py プロジェクト: KaiCode2/phenny
def scrape_wiki_codes():
    data = {}
    base_url = 'http://en.wikipedia.org/wiki/List_of_ISO_639'
    #639-1
    resp = web.get(base_url + '-1_codes')
    h = html.document_fromstring(resp)
    table = h.find_class('wikitable')[0]
    for row in table.findall('tr')[1:]:
        name = row.findall('td')[2].find('a').text
        code = row.findall('td')[4].text
        data[code] = name
    #639-2
    resp = web.get(base_url + '-2_codes')
    h = html.document_fromstring(resp)
    table = h.find_class('wikitable')[0]
    for row in table.findall('tr')[1:]:
        name = row.findall('td')[3].find('a')
        if name:
            name = name.text
        else:
            continue
        code_list = row.findall('td')[0].text.split(' ')
        if len(code_list) == 1:
            code = code_list[0]
        else:
            for i in code_list:
                if '*' in i:
                    code = i.replace('*', '')
                    break
        data[code] = name

    return data
コード例 #3
0
ファイル: geo.py プロジェクト: relet/pygeohashing
def getdjia(date):
    if date in djiacache:
        return djiacache[date]

    conn = pool.connection()
    cur = conn.cursor()

    cur.execute("select djia from djiacache where dato = '%s'" % (date,))
    djia = cur.fetchone()
    if djia:
        return djia[0]

    for crox in croxes:
        try:
            djia = web.get(crox % date)
            if not (("error" in djia) or ("available" in djia)):
                djiacache[date] = djia
                cur.execute("insert into djiacache values ('%s','%s')" % (date, djia))
                conn.commit()
            return djia
        except:
            pass
    try:
        y, m, d = str(date).split("-")
        djia = web.get("http://irc.peeron.com/xkcd/map/data/%s/%s/%s" % (y, m, d))
        return djia
    except Exception, e:
        return None
コード例 #4
0
ファイル: bikebot.py プロジェクト: evanjfraser/bikebot
def forecast(phenny, input):
    if input.group(2):
        fetch=readdbattr(input.group(2).lower(),'location')
    else:
        fetch=readdbattr(input.nick.lower(),'location')
    if fetch:
        arg=fetch.split(".")[0]
        if arg.isdigit():
            arg=fetch.split(";")[0] #fetched something so use location from said nick
        else:
            arg=fetch.split(".")[0] #fetched something so use location from said nick
    else:
        arg=input.group(2) # look up arguments given (place)
    try:
        if not arg:
            return phenny.say("Location please?")
    except:
        pass
    fccoordsjs=json.loads(web.get("http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address="+arg))
    pprint.pprint(fccoordsjs)
    coords=fccoordsjs['results'][0]['geometry']['location']
    fciojs=web.get("https://api.forecast.io/forecast/"+ phenny.config.forecastapikey+"/"+str(coords['lat'])+','+str(coords['lng']))
    fcjs=json.loads(fciojs)
    pprint.pprint(fcjs)
    try:
        out='Currently : ' + fcjs['currently']['summary']
    except:
        try:
            out='Next few minutes : '+fcjs['minutely']['summary']
        except:
            out='Next Hour : ' + fcjs['hourly']['summary']
    return phenny.say(out)
コード例 #5
0
ファイル: randomreddit.py プロジェクト: LordTroan/phenny
def randomreddit(phenny, input):

    subreddit = input.group(2)
    if not subreddit:
        phenny.say(".random <subreddit> - get a random link from the subreddit's frontpage")
        return
        
    if not re.match('^[A-Za-z0-9_-]*$',subreddit):
        phenny.say(input.nick + ": bad subreddit format.")
        return


    url = "http://www.reddit.com/r/" + subreddit + "/.json"
    try:
        resp = web.get(url)
    except:
        try:
            resp = web.get(url)
        except:
            try:
                resp = web.get(url)
            except:
                raise GrumbleError('Reddit or subreddit unreachable.')
    
    reddit = json.loads(resp)
    post = choice(reddit['data']['children'])

    nsfw = False
    if post['data']['over_18']:
        nsfw = True
    
    if nsfw:
        phenny.reply("!!NSFW!! " + post['data']['url'] + " (" + post['data']['title'] + ") !!NSFW!!")
    else:
        phenny.reply(post['data']['url'] + " (" +  post['data']['title'] + ")")
コード例 #6
0
ファイル: beacon.py プロジェクト: hive76/queenBee
	def checkBeacon(phenny):
		global beaconStatus
		beaconStatus = True
		while not singleton.beaconPlzStop:
			"""check beacon """
			f = open(beaconFile)
			activated = int(f.read())
			f.close()
			if (int(activated) + beaconDelay) > int(time.time()):
				web.get(beaconURL + 'ACTIVE')
				a.digital_write(9, firmata.HIGH)
				LEDShift(a, c.currentColor, "00FF00", 2)
				time.sleep(2)
				LEDShift(a, "00FF00", c.currentColor, 2)
				if (beaconStatus == False):
					print "Beacon has been activated"
					phenny.msg('#hive76', "Beacon has been activated")
				beaconStatus = True
			else:
				web.get(beaconURL + 'INACTIVE')
				a.digital_write(9, firmata.LOW)
				LEDShift(a, c.currentColor, "FF0000", 2)
				time.sleep(2)
				LEDShift(a, "FF0000", c.currentColor, 2)

				if (beaconStatus == True):
					phenny.msg('#hive76', "Beacon has deactivated")
					print "Beacon has been deactivated"
				beaconStatus = False
			time.sleep(30)
コード例 #7
0
ファイル: wiki.py プロジェクト: goavki/phenny
    def search(self, term):
        try:
            exactterm = format_term(term)
            exactterm = quote(exactterm)
            exacturl = self.endpoints['url'].format(exactterm)
            html = web.get(exacturl)
            return (html, exacturl)
        except HTTPError:
            pass

        term = deformat_term(term)
        term = quote(term)
        apiurl = self.endpoints['api'].format(term)

        try:
            result = json.loads(web.get(apiurl))
        except ValueError as e:
            raise ContentDecodingError(str(e))

        if 'error' in result:
            raise ServerFault(result['error'])

        result = result['query']['search']

        if not result:
            return None

        term = result[0]['title']
        term = format_term(term)
        term = quote(term)

        url = self.endpoints['url'].format(term)
        html = web.get(url)
        return (html, url)
コード例 #8
0
ファイル: search.py プロジェクト: JordanKinsley/PinkiePyBot
def dictionary_search(query, phenny): 
    if hasattr(phenny.config, 'wordnik_api_key'):
        query = query.replace('!', '')
        query = web.quote(query)
        try:
            uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
            rec_bytes = web.get(uri)
            jsonstring = json.loads(rec_bytes)
            dword = jsonstring[0]['word']
        except:
            try:
                query = query.lower()
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
                dword = jsonstring[0]['word']
            except:
                query = string.capwords(query)
                uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
                rec_bytes = web.get(uri)
                jsonstring = json.loads(rec_bytes)
        try:
            dword = jsonstring[0]['word']
        except:
            return None
        if dword:
            ddef = jsonstring[0]['text']
            dattr = jsonstring[0]['attributionText']
            dpart = jsonstring[0]['partOfSpeech']
            dpart = dpart.replace('-', ' ')
            dpart = string.capwords(dpart)
            return (dword + ' - ' + dpart + ' - ' + ddef + ' - ' + dattr)
    else:
        return 'Sorry but you need to set your wordnik_api_key in the config file.'
コード例 #9
0
ファイル: osu.py プロジェクト: rnewman/jenni
def office(jenni, input):
    try:
        site = web.get("http://opensource.osu.edu/~meinwald/office.php")
    except:
        site = web.get("http://web2/~meinwald/office.php")
    lines = site.split("\n")
    jenni.reply(lines[2])
コード例 #10
0
ファイル: search.py プロジェクト: schnuffle/jenni
def duck_api(query):
    '''Send 'query' to DDG's API and return results as a dictionary'''
    query = web.urllib.quote(query)
    uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1&kp=-1' % query
    results = web.get(uri)
    results = json.loads(web.get(uri))
    return results
コード例 #11
0
ファイル: tempo.py プロジェクト: jimmyskull/pysweep
 def ShowWeather(self, local, para, nick, todos, detalhes):
     host = 'http://br.weather.com'
     uri = host + '/search/search?what=WeatherLocalUndeclared&where=' + local.replace(' ', '+').lower()
     dados = web.get(uri, True)
     src = dados[0]
     
     if Tag.inicio_conteudo in src:
         self.weather(src, para, dados[1], detalhes)
     else:
         locais = []
         if not (Tag.titulo in src):
             self.Bot.Say(para, self.msg_rand(Frases.nao_encontrado).replace('%cidade', local) )
         else:
             src = src[src.find(Tag.titulo):]
             while (Tag.link_inicio in src):
                 tag = src.find(Tag.link_fim)
                 item = src[src.find(Tag.link_inicio):tag]
                 locais.append(item[:item.find('"')])
                 src = src[tag+len(Tag.link_fim):]
             if len(locais) == 0: return
             if len(locais) == 1: todos = True
             if not todos:
                 self.Bot.Say(para, Frases.varios_resultados % len(locais))
             else:
                 for link in locais:
                     dados = web.get(host + link, True)
                     self.weather(dados[0], para, dados[1], detalhes)
コード例 #12
0
ファイル: head.py プロジェクト: ask-compu/CompuBot
def spotify_track(uri, phenny, radio):
    idsplit = uri.split('/')
    if radio is False:
        id = idsplit[4]
    else:
        id = idsplit[5]
    apiuri = 'https://api.spotify.com/v1/tracks/' + id
    try:
        rec_bytes = web.get(apiuri)
    except:
        return
    jsonstring = json.loads(rec_bytes)
    track = jsonstring['name']
    album = jsonstring['album']['name']
    artistarray = jsonstring['artists']
    if len(artistarray) > 1:
        multipleartists = True
    else:
        multipleartists = False
    if multipleartists is False:
        artist = artistarray[0]['name']
    else:
        artist = "Various Artists"
    albumid = jsonstring['album']['id']
    albumurl = 'https://api.spotify.com/v1/albums/' + albumid
    try:
        rec_bytes_album = web.get(albumurl)
        jsonstringalbum = json.loads(rec_bytes_album)
        released = jsonstringalbum['release_date']
    except:
        isdateutil = False
    try:
        import dateutil.parser
        isdateutil = True
        dt = dateutil.parser.parse(released)
        timestamp1 = calendar.timegm(dt.timetuple())
        timestamp1 = time.gmtime(timestamp1)
        if re.compile('day').match(jsonstringalbum['release_date_precision']):
            releasedformat = time.strftime('on %A %B %d, %Y',timestamp1)
        else:
            if re.compile('month').match(jsonstringalbum['release_date_precision']):
                releasedformat = time.strftime('in %B, %Y',timestamp1)
            else:
                if re.compile('year').match(jsonstringalbum['release_date_precision']):
                   releasedformat = time.strftime('in %Y',timestamp1)
                else:
                    isdateutil = False
    except:
        isdateutil = False
    milliseconds = jsonstring['duration_ms']
    seconds=(milliseconds/1000)%60
    minutes=(milliseconds/(1000*60))%60
    minutes = str(int(minutes))
    seconds = str(round(seconds)).zfill(2)
    tracktime = minutes + ":" + seconds
    if isdateutil is True:
        return '\002\00303,01Spotify\017 ' + track + ' - ' + artist + ' - ' + album + ' - ' + tracktime + ' released ' + releasedformat
    else:
        return '\002\00303,01Spotify\017 ' + track + ' - ' + artist + ' - ' + album + ' - ' + tracktime
コード例 #13
0
ファイル: warnings.py プロジェクト: GunioRobot/jenni
def nws_lookup(jenni, input):
    """ Look up weather watches, warnings, and advisories. """
    text = input.group(2)
    if not text:
        return
    bits = text.split(",")
    master_url = False
    if len(bits) == 2:
        ## county given
        url_part1 = "http://alerts.weather.gov"
        state = bits[1].lstrip().rstrip().lower()
        county = bits[0].lstrip().rstrip().lower()
        if state not in states:
            jenni.reply("State not found.")
            return
        url1 = county_list.format(states[state])
        page1 = web.get(url1).split("\n")
        for line in page1:
            mystr = ">" + unicode(county) + "<"
            if mystr in line.lower():
                url_part2 = line[9:36]
                break
        if not url_part2:
            jenni.reply("Could not find county.")
            return
        master_url = url_part1 + url_part2
        location = text
    elif len(bits) == 1:
        ## zip code
        if bits[0]:
            urlz = zip_code_lookup.format(bits[0])
            pagez = web.get(urlz)
            fips = re_fips.findall(pagez)
            if fips:
                state = re_state.findall(pagez)
                if not state:
                    jenni.reply("Could not match ZIP code to a state")
                    return
                location = state[0]
                state = location[-2:]
                fips = unicode(state) + "C" + unicode(fips[0])
                master_url = alerts.format(fips)
            else:
                jenni.reply("ZIP code does not exist.")
                return

    if not master_url:
        jenni.reply("Invalid input. Please enter a ZIP code or a county and state pairing, such as 'Franklin, Ohio'")
        return

    feed = feedparser.parse(master_url)
    for item in feed.entries:
        if nomsg[:51] == item["title"]:
            jenni.reply(nomsg.format(location))
            break
        else:
            jenni.reply(unicode(item["title"]))
            jenni.reply(unicode(item["summary"]))
コード例 #14
0
ファイル: honstringtables.py プロジェクト: Theino/pyHoNBot
def setup(bot):
    if True:#not hasattr(bot,'stringtables'):
        bot.stringtable_version = None

    verinfo = bot.masterserver_request({'version' : '0.0.0.0', 'os' : 'lac' ,'arch' : 'x86-biarch'},path = 'patcher/patcher.php')
    verinfo = verinfo[0]
    if bot.stringtable_version == verinfo['version']:
        print("no need to update stringtables")
        return


    manifest = None
    try:
        manifest = web.get('{0}{1}/{2}/{3}/manifest.xml.zip'.format(verinfo['url'],verinfo['os'],verinfo['arch'],verinfo['version']))
    except:pass
    if manifest is None:
        try:
            manifest = web.get('{0}{1}/{2}/{3}/manifest.xml.zip'.format(verinfo['url2'],verinfo['os'],verinfo['arch'],verinfo['version']))
        except:pass
    if manifest is None:
        print("Couldn't get manifest for hon's files")
        return
    
    bot.stringtables = {}
    manifest = etree.fromstring(zipfile.ZipFile(StringIO.StringIO(manifest)).read('manifest.xml'))
    files = []
    for e in manifest:
        if e.tag == 'file' and e.attrib['path'] in stringtablefiles:
            files.append(e.attrib)
    for f in files:
        if f['version'].count('.') == 3 and f['version'].endswith('.0'):
            f['version'] = f['version'][:-2]
        table = None
        try:
            table = web.get('{0}{1}/{2}/{3}/{4}.zip'.format(verinfo['url'],verinfo['os'],verinfo['arch'],f['version'],f['path']))
        except:pass
        if table is None:
            try:
                table = web.get('{0}{1}/{2}/{3}/{4}.zip'.format(verinfo['url2'],verinfo['os'],verinfo['arch'],f['version'],f['path']))
            except:pass
        if table is None:
            print("Wasn't able to fetch {0}".format(f['path']))
            continue
        table = zipfile.ZipFile(StringIO.StringIO(table)).read(basename(f['path']))
        try:
            table = table.decode("utf8")
        except:
            table = table.decode("cp1251")
        table = table.splitlines()
        for line in table:
            m = re_entry.match(line)
            if m:
                bot.stringtables[m.group(1)] = m.group(2)
    bot.stringtable_version = verinfo['version']
コード例 #15
0
ファイル: geohashing.py プロジェクト: relet/pygeohashing
def getdjia(date):
  for crox in croxes:
    try:
      return web.get(crox % date)
    except:
      pass
  try:
    y,m,d = str(date).split("-")
    return web.get("http://irc.peeron.com/xkcd/map/data/%s/%s/%s" % (y,m,d))
  except Exception,e:
    print date, e
    return None
コード例 #16
0
ファイル: apertium_wiki.py プロジェクト: Nuruddinjr/phenny
def awik(phenny, input):
   """Search for something on Apertium wiki."""
   origterm = input.groups()[1]

   if not origterm: 
      return phenny.say('Perhaps you meant ".wik Zen"?')
   #origterm = origterm.encode('utf-8')

   term = format_term(origterm)
   
   try:
      html = str(web.get(wikiuri % (term)))
   except:
      apiResponse = json.loads(str(web.get(wikisearchuri % (term, 'title'))))
      if len(apiResponse['query']['search']):
        term = apiResponse['query']['search'][0]['title']
        html = str(web.get(wikiuri % (term)))
      else:
        apiResponse = json.loads(str(web.get(wikisearchuri % (term, 'text'))))
        if len(apiResponse['query']['search']):
          term = apiResponse['query']['search'][0]['title']
          html = str(web.get(wikiuri % (term)))
        else:
          phenny.reply("No wiki results for that term.")
          return
   
   page = lxml.html.fromstring(html)

   if "#" in origterm:
      section = format_subsection(origterm.split("#")[1])
      text = page.find(".//span[@id='%s']" % section)
      if text is None:
         phenny.reply("That subsection does not exist.")
         return
      text = text.getparent().getnext()
   else:
      paragraphs = page.findall('.//p')
      if len(paragraphs) > 2:
        text = page.findall('.//p')[1]
      else:
        text = page.findall(".//*[@id='mw-content-text']")[0]

   sentences = text.text_content().split(". ")
   sentence = '"' + sentences[0] + '"'
 
   maxlength = 430 - len((' - ' + wikiuri % (format_term_display(term))).encode('utf-8'))
   if len(sentence.encode('utf-8')) > maxlength: 
      sentence = sentence.encode('utf-8')[:maxlength].decode('utf-8', 'ignore')
      words = sentence[:-5].split(' ')
      words.pop()
      sentence = ' '.join(words) + ' [...]'

   phenny.say(sentence + ' - ' + wikiuri % (format_term_display(term)))
コード例 #17
0
ファイル: imdb.py プロジェクト: mutantmonkey/phenny
def imdb_search(query):
    query = query.replace('!', '')
    query = web.quote(query)
    uri = 'http://imdb.com/find?q=%s' % query
    bytes = web.get(uri)
    m = r_imdb_find.search(bytes)
    if not m: return m
    ID = web.decode(m.group(1))
    uri = 'http://imdb.com/title/%s' % ID
    bytes = web.get(uri)
    bytes = bytes.replace('\n', '')
    info = r_imdb_details.search(bytes)
    info = {'Title': info.group(1), 'Year': info.group(2), 'Plot': info.group(3), 'imdbID': ID}
    return info
コード例 #18
0
ファイル: bikebot.py プロジェクト: evanjfraser/bikebot
def strava(phenny, input):
  return phenny.reply("Strava is no worky because of API updates. Until Strava releases their API to the public. No can do.")
  n=input.nick
  r=" lives in "
  l=""
  if not input.group(2):
    fetch=readdbattr(n.lower(),'strava')
    if fetch:
      l=fetch
    else:
      return
  else:
    t = unicodedata.normalize('NFKD', input.group(2)).encode('ascii', 'ignore').split(' ')
    if len(t) >= 1:
      if t[0] == 'set':
        if len(t) < 2:
          return 
        else:
          updatedbattr(n.lower(), 'strava',input.group(2)[4:])
          return phenny.say('Strava set up!')
      else:
        n=t[0]
        fetch=readdbattr(n.lower(),'strava')
        if fetch:
          l=fetch
        else:
          return
  saresp=urllib2.urlopen('http://www.strava.com/api/v1/rides/' + l).read()
  pprint.pprint(saresp)
  erract=False
  if 'error' in saresp:
      erract=True
  else:
      js2resp=json.loads(saresp)
  pprint.pprint(erract)
  sresp=web.get('http://www.strava.com/api/v1/rides?athleteName=' + l);
  pprint.pprint(sresp)
  if erract==True:
      if 'rides' in sresp:
          jsresp=json.loads(sresp)
          jsrespid=jsresp['rides'][0]
          pprint.pprint(jsrespid)
          s2resp=web.get('http://www.strava.com/api/v1/rides/' + str(jsresp['rides'][0]['id']))
          js2resp=json.loads(s2resp)
          pprint.pprint(js2resp)
      else:
          phenny.reply('something gone wrong tell krisfremen')
          return
  phenny.reply(js2resp['ride']['athlete']['username'] + ' rode ' + "%.2f" % float(js2resp['ride']['distance']/1000)  + ' km ('+ "%.2f" % float((js2resp['ride']['distance']/1000)*0.621371)  +' mi) in ' + str(datetime.timedelta(seconds=js2resp['ride']['elapsedTime'])) + ' (' +str(datetime.timedelta(seconds=js2resp['ride']['movingTime'])) + ' moving) averaging ' + "%.2f" % float(js2resp['ride']['averageSpeed']*3.6) + ' kph (' + "%.2f" % float((js2resp['ride']['averageSpeed']*3.6)*0.621371) + ' mph) climbing ' +  "%.2f" % float((js2resp['ride']['elevationGain'])) + 'm (' + "%.2f" % float((js2resp['ride']['elevationGain'])*3.2808399)  +   ' ft) on ' + time.strftime('%b %d, %Y',time.strptime(js2resp['ride']['startDate'],'%Y-%m-%dT%H:%M:%SZ')) + ' titled ' + js2resp['ride']['name'])
コード例 #19
0
ファイル: tfw.py プロジェクト: downquark/phenny
def tfw(phenny, input, fahrenheit=False, celsius=False):
    """.tfw <city/zip> - Show the f*****g weather at the specified location."""

    where = input.group(2)
    url = "http://thefuckingweather.com/?where=" + urlquote(where)
    if not where:
        url = "http://thefuckingweather.com/?random=True"

    if not fahrenheit:
        url += "&unit=c"

    try:
        req = web.get(url)
    except (HTTPError, IOError):
        # the f*****g weather is f*****g unstable, try again
        try:
            req = web.get(url)
        except (HTTPError, IOError):
            raise GrumbleError("THE INTERNET IS F*****G BROKEN. Please try again later.")

    doc = lxml.html.fromstring(req)

    try:
        location = doc.get_element_by_id('locationDisplaySpan').text_content()

        temp_sel = lxml.cssselect.CSSSelector('span.temperature')
        temp = temp_sel(doc)[0].text_content()
        temp = int(temp)
    except (IndexError, KeyError):
        phenny.say("UNKNOWN F*****G LOCATION. Try another?")
        return

    # add units and convert if necessary
    if fahrenheit:
        temp = "{0:d}°F‽".format(temp)
    elif celsius:
        temp = "{0:d}°C‽".format(temp)
    else:
        tempev = (temp + 273.15) * 8.617343e-5 * 1000
        temp = "%f meV‽" % tempev
    
    remark_sel = lxml.cssselect.CSSSelector('p.remark')
    remark = remark_sel(doc)[0].text_content()

    flavor_sel = lxml.cssselect.CSSSelector('p.flavor')
    flavor = flavor_sel(doc)[0].text_content()

    response = "%s %s - %s - %s" % (temp, remark, flavor, location)
    phenny.say(response)
コード例 #20
0
ファイル: wuvt.py プロジェクト: LordTroan/phenny
def wuvt(phenny, input) :
    try:
        playing = web.get('http://www.wuvt.vt.edu/playlists/latest_track.php')
        djpage = web.get('http://www.wuvt.vt.edu/playlists/current_dj.php')
    except (URLError, HTTPError):
        raise GrumbleError('Cannot connect to wuvt')
    play= r_play.search(playing)
    song = play.group(1)
    artist = play.group(2)
    dj = r_dj.search(djpage).group(1)

    if song and artist:
        phenny.reply('DJ {0} is currently playing: {1} by {2}'.format(dj.strip(),song,artist))
    else:
        phenny.reply('Cannot connect to wuvt')
コード例 #21
0
ファイル: wuvt.py プロジェクト: qinmingyue/phenny
def wuvt(phenny, input):
    try:
        playing = web.get("http://www.wuvt.vt.edu/playlists/latest_track.php")
        djpage = web.get("http://www.wuvt.vt.edu/playlists/current_dj.php")
    except:
        raise GrumbleError("Cannot connect to wuvt")
    play = r_play.search(playing)
    song = play.group(2)
    artist = play.group(1)
    dj = r_dj.search(djpage).group(1)

    if song and artist:
        phenny.reply("DJ {0} is currently playing: {1} by {2}".format(dj.strip(), song.strip(), artist.strip()))
    else:
        phenny.reply("Cannot connect to wuvt")
コード例 #22
0
ファイル: calc.py プロジェクト: tdunne/caesar
def py(caesar, input): 
   query = input.group(2).encode('utf-8')
   uri = 'http://tumbolia.appspot.com/py/'
   answer = web.get(uri + web.urllib.quote(query))
   if answer: 
      caesar.say(answer)
   else: caesar.reply('Sorry, no result.')
コード例 #23
0
ファイル: dinner.py プロジェクト: freeboson/jenni-misc
def fucking_dinner(jenni, input):
    '''.fd -- provide suggestions for dinner'''
    txt = input.group(2)
    url = 'http://www.whatthefuckshouldimakefordinner.com'
    if txt == '-v':
        url = 'http://whatthefuckshouldimakefordinner.com/veg.php'
    page = web.get(url)
    re_mark = re.compile('<dt><a href="(.*?)" target="_blank">(.*?)</a></dt>')
    results = re_mark.findall(page)

    if results:

        dish = results[0][1].upper()
        long_url = results[0][0]

        try:
            short_url = web.urllib.urlopen(metamark_api, 
                            web.urllib.urlencode({'long_url':long_url})).read()
        except:
            short_url = long_url

        jenni.say("WHY DON'T YOU EAT SOME F*****G: " + dish +
                  " HERE IS THE RECIPE: " + short_url)

    else:
        jenni.say("I DON'T F*****G KNOW, EAT PIZZA.")
コード例 #24
0
ファイル: translate.py プロジェクト: Kitsueki/jenni
def detect(text): 
    uri = 'http://ajax.googleapis.com/ajax/services/language/detect'
    q = urllib.quote(text)
    bytes = web.get(uri + '?q=' + q + '&v=1.0')
    result = web.json(bytes)
    try: return result['responseData']['language']
    except Exception: return None
コード例 #25
0
ファイル: codepoints.py プロジェクト: Jarada/jenni
def load_data():
    all_code = web.get('http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt')
    for line in all_code.split('\n'):
        parts = line.split(';')
        if len(parts) >= 10:
            name = parts[1]
            name_parts = name.split(',')
            name_part_tmp = name_parts[0].replace('<', '')

            ## look for codepoint ranges
            if 'First>' in name:
                if name_part_tmp not in cp_ranges:
                    cp_ranges[name_part_tmp] = list()
                cp_ranges[name_part_tmp].append(parts[0])
            elif 'Last>' in name:
                if name_part_tmp not in cp_ranges:
                    cp_ranges[name_part_tmp] = list()
                cp_ranges[name_part_tmp].append(parts[0])

            if parts[10]:
                name += ' ' + str(parts[10])

            ## remove '<' and '>' from names (usually only on ranges)
            name = name.replace('<', '')
            name = name.replace('>', '')
            cp_names[parts[0]] = name

    ## generate codepoints for ranges founded above
    for cp_range in cp_ranges:
        cps = cp_ranges[cp_range]
        start = cps[0]
        end = cps[1]

        for number in xrange(int(start, 16), int(end, 16)):
            cp_names['%04X' % (number)] = cp_range
コード例 #26
0
ファイル: rule34.py プロジェクト: ask-compu/CompuBot
def get_boru(phenny, site, tags):
    try:
        req = web.get("https://{0}.net/post/index.json?tags={1}".format(site, urlquoteplus(tags)))
    except (HTTPError, IOError):
        phenny.say("Oopsies, looks like the Internet is broken.")
        return

    results = json.loads(req, encoding="utf-8")

    if len(results) <= 0:
        phenny.reply("Huh. {0} is missing {1}".format(site, tags))
        return

    try:
        link = "https://{0}.net/post/show/{1}/".format(site, choice(results)["id"])
        if site is "e621":
            image = choice(results)["file_url"]
        if site is "twentypercentcooler":
            image = "https://twentypercentcooler.net" + (choice(results)["file_url"])
    except AttributeError:
        phenny.say("Oopsies, looks like the Internet is broken.")

    tags = results[0]
    rating = tags["rating"]
    if rating in ("q", "e"):
        link = "!!NSFW!! -> {0} <- !!NSFW!!".format(link)
        image = "!!NSFW!! -> {0} <- !!NSFW!!".format(image)
    return image
コード例 #27
0
ファイル: rule34.py プロジェクト: ask-compu/CompuBot
def rule34(phenny, input):
    """.rule34 <query> - Rule 34: If it exists there is p**n of it."""

    if input.nick in phenny.config.user_ignore:
        return

    if check_nsfw(phenny, input.sender, None, input.nick):
        return
    q = input.group(2)
    if not q:
        phenny.say(rule34.__doc__.strip())
        return

    try:
        req = web.get(
            "https://rule34.xxx/index.php?page=dapi&s=post&q=index&tags={0}".format(urlquote(q))
        )  # Lets use XML!
    except (HTTPError, IOError):
        raise GrumbleError("THE INTERNET IS F*****G BROKEN. Please try again later.")

    results = ET.fromstring(req)

    if len(results) <= 0:
        phenny.reply("Huh. rule34.xxx is missing {0}".format(q))
        return

    try:
        link = "https:" + (choice(results).attrib["file_url"])
    except AttributeError:
        raise GrumbleError("THE INTERNET IS BROKEN. Please try again later.")

    response = "!!NSFW!! -> {0} <- !!NSFW!!".format(link)
    phenny.reply(response)
コード例 #28
0
ファイル: calc.py プロジェクト: cripperz/jenni
def wa(jenni, input):
    """.wa <input> -- queries WolframAlpha with the given input."""
    if not input.group(2):
        return jenni.reply("No search term.")
    query = input.group(2).encode('utf-8')
    uri = 'https://tumbolia.appspot.com/wa/'
    try:
        answer = web.get(uri + web.urllib.quote(query.replace('+', '%2B')))
    except timeout as e:
        return jenni.say("Request timd out")
    if answer:
        answer = answer.decode("string_escape")
        answer = HTMLParser.HTMLParser().unescape(answer)
        match = re.search('\\\:([0-9A-Fa-f]{4})', answer)
        if match is not None:
            char_code = match.group(1)
            char = unichr(int(char_code, 16))
            answer = answer.replace('\:' + char_code, char)
        waOutputArray = string.split(answer, ";")
        newOutput = list()
        for each in waOutputArray:
            temp = each.replace('\/', '/')
            newOutput.append(temp)
        waOutputArray = newOutput
        if (len(waOutputArray) < 2):
            jenni.say(answer)
        else:
            jenni.say(waOutputArray[0] + " = " + waOutputArray[1])
        waOutputArray = list()
    else:
        jenni.say('Sorry, no result from WolframAlpha.')
コード例 #29
0
ファイル: rule34.py プロジェクト: ask-compu/CompuBot
def derpibooru_search(query, phenny):
    query = query.replace("!", "")
    query = web.quote(query)
    if hasattr(phenny.config, "derpibooru_key"):
        uri = "https://derpibooru.org/search.json?q=" + query + "&key=" + phenny.config.derpibooru_key
    else:
        uri = "https://derpibooru.org/search.json?q=" + query
    rec_bytes = web.get(uri)
    jsonstring = json.loads(rec_bytes)
    dhits = jsonstring["total"]
    if dhits > 0:
        results = choice(jsonstring["search"])
        url = "https:" + results["image"]
        uploader = results["uploader"]
        uploaded = results["created_at"]
        try:
            import dateutil.parser

            isdateutil = True
            dt = dateutil.parser.parse(uploaded)
            timestamp1 = calendar.timegm(dt.timetuple())
            timestamp1 = time.gmtime(timestamp1)
            uploadedformat = time.strftime("%A %B %d, %Y at %I:%M:%S %p", timestamp1)
        except:
            isdateutil = False
        if isdateutil is True:
            return url + " uploaded by " + uploader + " on " + uploadedformat
        else:
            return url + " uploaded by " + uploader
    else:
        return
コード例 #30
0
ファイル: url.py プロジェクト: vaicine/chedder
def short(text):
    """
    This function creates a bitly url for each url in the provided "text" string.
    The return type is a list.
    """

    if not bitly_loaded: return [ ]
    bitlys = [ ]
    try:
        a = re.findall(url_finder, text)
        k = len(a)
        i = 0
        while i < k:
            b = str(a[i][0])
            if not b.startswith("http://bit.ly") or not b.startswith("http://j.mp/"):
                # check to see if the url is valid
                try: c = web.head(b)
                except: return [[None, None]]

                url = "http://api.j.mp/v3/shorten?login=%s&apiKey=%s&longUrl=%s&format=txt" % (bitly_user, bitly_api_key, urllib2.quote(b))
                shorter = web.get(url)
                shorter.strip()
                bitlys.append([b, shorter])
            i += 1
        return bitlys
    except:
        return
コード例 #31
0
def nws_lookup(jenni, input):
    ''' Look up weather watches, warnings, and advisories. '''
    text = input.group(2)
    if not text:
        return jenni.reply('You need to provide some input.')
    bits = text.split(',')
    master_url = False
    if len(bits) == 2:
        ## county given
        county = bits[0]
        state = bits[1]
        url_part1 = 'https://alerts.weather.gov'
        state = (state).strip().lower()
        county = (county).strip().lower()
        reverse_lookup = list()
        if len(state) == 2:
            reverse_lookup = [k for k, v in states.iteritems() if v == state]
            if reverse_lookup:
                state = reverse_lookup[0]
        if state not in states and len(reverse_lookup) < 1:
            jenni.reply('State not found.')
            return
        url1 = county_list.format(states[state])
        page1 = web.get(url1).split('\n')
        prev1 = str()
        prev2 = str()
        url_part2 = str()
        for line in page1:
            mystr = '>' + unicode(county) + '<'
            if mystr in line.lower():
                url_part2 = prev2[9:40]
                break
            prev2 = prev1
            prev1 = line
        if not url_part2:
            return jenni.reply('Could not find county.')
        master_url = 'https://alerts.weather.gov/cap/' + url_part2
        location = text
    elif len(bits) == 1:
        ## zip code
        if bits[0]:
            zip_code = bits[0]
            zips = re_zip.findall(zip_code)
            if not zips:
                return jenni.reply('ZIP is invalid.')
            else:
                try:
                    zip_code = zips[0][0]
                except:
                    return jenni.reply('ZIP could not be validated.')
            urlz = zip_code_lookup.format(zip_code)
            pagez = web.get(urlz)
            fips = re_fips.findall(pagez)
            if fips:
                state = re_state.findall(pagez)
                city = re_city.findall(pagez)
                if not state and not city:
                    return jenni.reply('Could not match ZIP code to a state')
                try:
                    state = state[0].lower()
                    state = states[state].upper()
                    location = city[0] + ', ' + state
                    fips_combo = unicode(state) + 'C' + unicode(fips[0])
                    master_url = alerts.format(fips_combo)
                except:
                    return jenni.reply(
                        'Could not parse state or city from database.')
            else:
                return jenni.reply('ZIP code does not exist.')

    if not master_url:
        return jenni.reply(
            'Invalid input. Please enter a ZIP code or a county and state pairing, such as \'Franklin, Ohio\''
        )

    feed = feedparser.parse(master_url)
    warnings_dict = dict()
    for item in feed.entries:
        if nomsg[:51] == colourize(item['title']):
            return jenni.say(nomsg.format(location))
        else:
            warnings_dict[colourize(unicode(item['title']))] = unicode(
                item['summary'])

    if len(warnings_dict) > 0:
        ## if we have any alerts...
        ## let us sort it so the most recent thing is first, then second, etc...
        warn_keys = warnings_dict.keys()
        find_issue = re.compile('issued (\S+) (\S+) at (\S+):(\S+)(\S)M')
        warn_keys_dt = dict()
        for warn in warn_keys:
            warn_dt = find_issue.findall(warn)
            if len(warn_dt) > 0:
                warn_dt = warn_dt[0]
                month = months[warn_dt[0]]
                day = int(warn_dt[1])
                hour = int(warn_dt[2])
                minute = int(warn_dt[3])
                if warn_dt[-1] == 'P':
                    if hour < 12:
                        hour += 12
                year = datetime.datetime.now().year

                hour -= 1
                warn_keys_dt[warn] = datetime.datetime(year, month, day, hour,
                                                       minute)

        warn_list_dt = sorted(warn_keys_dt, key=warn_keys_dt.get, reverse=True)
        #print 'warn_list_dt', warn_list_dt

        if input.sender.startswith('#') and not (
                input.group(1)).startswith('nws-more'):
            ## if queried in channel
            for key in warn_list_dt:
                jenni.say(key)
            jenni.say(more_info.format(location, master_url))
        else:
            ## if queried in private message
            for key in warn_list_dt:
                jenni.say(key)
                jenni.say(warnings_dict[key])
            jenni.say(more_info.format(location, master_url))
コード例 #32
0
ファイル: southpark.py プロジェクト: greg-hellings/jenni
def southparktimes(jenni, input):
    global cache, cachets, maxtitlelen, maxepilen
    tsnow = datetime.now()
    if cache['TIMES'] is not None and cachets[
            'TIMES'] is not None and tsnow - cachets['TIMES'] <= cachetsreset:
        printListings(jenni)
        return

    src = web.get(
        'http://www.comedycentral.com/tv_schedule/index.jhtml?seriesId=11600&forever=please'
    )
    parts = src.split('<div id="tv_schedule_content">')
    cont = parts[1]
    parts = cont.split('<div id="tv_schedule_bottom">')
    cont = parts[0]
    schedule = cont.split('<div class="schedDiv">')
    del schedule[0]

    info = []
    count = 5
    for s in schedule:
        s = s.replace('\n', ' ')
        s = htmlDecode(s)

        ## gets the date
        sidx = s.index('<div class="schedDateText">')
        send = s.index('</div>', sidx)
        if sidx == -1 or send == -1: break

        m = re.search('>([^<]{2,})$', s[sidx:send])
        if m is None: break

        date = m.group(1).strip()
        sdate = time.strptime(date, '%A %b %d %Y')

        ## get episodes for the s-th date
        tepi = s.split('<td class="mtvn-cal-time"')
        del tepi[0]
        for t in tepi:

            ## gets the schedule time
            sidx = t.index('<nobr>')
            send = t.index('</nobr>', sidx)

            if sidx == -1 or send == -1: break

            stime = t[sidx + 6:send].strip()

            ## gets the schedule episode name
            sidx = t.index('<b>', send)
            send = t.index('</b>', sidx)

            if sidx == -1 or send == -1: break

            stitle = t[sidx + 3:send].strip()
            m = re.search('\(([^)]+)\)$', stitle)
            if m is None: break
            sepi = str(int(m.group(1)))
            if len(sepi) > maxepilen: maxepilen = len(sepi)
            stitle = stitle.replace(m.group(), '')
            lenstitle = len(stitle)
            if lenstitle > maxtitlelen: maxtitlelen = lenstitle

            ## gets the schedule episode desc
            sidx = send
            send = t.index('</span>', sidx)

            if send == -1: break

            m = re.search('>([^<]{2,})$', t[sidx:send])

            if m is None: break

            sdesc = m.group(1).strip()

            info.append([sdate, sepi, stitle, stime])

            count -= 1
            if count == 0: break
        if count == 0: break
    cache['TIMES'] = info
    cachets['TIMES'] = tsnow
    printListings(jenni)
コード例 #33
0
def scrape_wiki_zones():
    data = {}
    url = 'http://en.wikipedia.org/wiki/List_of_time_zone_abbreviations'
    resp = web.get(url)
    h = html.document_fromstring(resp)
    table = h.find_class('wikitable')[0]
    for row in table.findall('tr')[1:]:
        code = row.findall('td')[0].text
        offset = row.findall('td')[2].find('a').text[3:]
        offset = offset.replace('−', '-')  # replacing minus sign with hyphen
        if offset.find(':') > 0:
            offset = int(offset.split(':')[0]) + int(offset.split(':')[1]) / 60
        else:
            if offset == '':
                offset = 0
            offset = offset.strip('±')
            offset = int(offset)
        data[code] = offset

    #this is now broken
    #file_url = "http://www.citytimezones.info/database/cities_csv.zip"
    #file_name = "cities_csv.zip"
    #
    #with urllib.request.urlopen(file_url) as response, open(file_name, 'wb') as out_file:
    #    shutil.copyfileobj(response, out_file)
    #    with zipfile.ZipFile(file_name) as zf:
    #        a = zf.extractall()
    #        print(zf)
    #
    #with open("cities.txt", "rt", encoding="UTF8") as csvfile:
    #    csvreader = csv.reader(csvfile)
    #    for row in csvreader:
    #        tmr = 0
    #        for elem in row:
    #            tmr=tmr+1
    #            if tmr==1:
    #                ctz=elem
    #            elif tmr==2:
    #                if re.match("\(GMT\)", elem):
    #                    ctu="+00:00"
    #                else:
    #                    r = re.compile("\(GMT([+-]*\d*:\d*)\)")
    #                    m = r.match(elem)
    #                    if m.group(1) != None:
    #                        ctu = m.group(1)
    #                    else:
    #                        return
    #                if ctu[ctu.find(':')+1]=='0':
    #                    ctu=ctu[:ctu.find(':')]
    #                else:
    #                    ctu=ctu[:ctu.find(':')]+'.5'
    #                if ctu[0]=='−':
    #                    ctu='-'+ctu[1:]
    #                data[ctz.upper()]=float(ctu)
    #            else:
    #                break
    url = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
    resp = web.get(url)
    h = html.document_fromstring(resp)
    table = h.find_class('wikitable')[0]
    for trs in table.findall('tr'):
        tmr = 0
        for tds in trs.findall('td'):
            tmr = tmr + 1
            if tmr == 3:
                ctz = tds.find('a').text[tds.find('a').text.find('/') +
                                         1:].replace('_', ' ')
                if ctz.find('/') != -1:
                    ctz = ctz[ctz.find('/') + 1:]
            if tmr == 5:
                ctuFind = tds.find('a')
                if ctuFind != None:
                    ctu = ctuFind.text
                    if ctu[ctu.find(':') + 1] == '0':
                        ctu = ctu[:ctu.find(':')]
                    else:
                        ctu = ctu[:ctu.find(':')] + '.5'
                    if ctu[0] == '−':
                        ctu = '-' + ctu[1:]
                    data[ctz.upper()] = float(ctu)

    return data
コード例 #34
0
import web  #importerar filen med get funktionen

a = ""  #den ville ha a som någonting
a = web.get(
    "https://5hyqtreww2.execute-api.eu-north-1.amazonaws.com/artists/"
)  #exempelurl för uppgift 6.3, get returnerar ett värde som vi sätter a till
print(a)
コード例 #35
0
ファイル: head.py プロジェクト: vikramkashyap/phenny
def gettitle(phenny, input, uri):
    if "posted" in phenny.variables:
        from modules.posted import check_posted

    if not ':' in uri:
        uri = 'http://' + uri
    uri = uri.replace('#!', '?_escaped_fragment_=')

    if uri.startswith('http://wiki.apertium.org/wiki/'):
        item = uri[len('http://wiki.apertium.org/wiki/'):]
        check_posted(phenny, input, uri)
        return awik(phenny, re.match(r'(blahblah)?(.*)', item))
    if re.match(r'https?://en.wiktionary.org/wiki/(.*)', uri):
        check_posted(phenny, input, uri)
        item = re.match(r'https?://en.wiktionary.org/wiki/(.*)', uri).group(1)
        return w(phenny, re.match(r'(blahblah)?(.*)', web.unquote(item)))
    if re.match(r'https?://([a-z]{2,3}).wikipedia.org/wiki/(.*)', uri):
        match = re.match(r'https?://([a-z]{2,3}).wikipedia.org/wiki/(.*)', uri)
        lang, page = match.group(1), match.group(2)
        return wikipedia(phenny, input, page, lang)

    parts = uri.split(".")
    start = parts[0]
    parts.pop(0)
    uri = start + "." + web.quote('.'.join(parts), safe='/#')

    title = None
    localhost = [
        'http://localhost/',
        'http://localhost:80/',
        'http://localhost:8080/',
        'http://127.0.0.1/',
        'http://127.0.0.1:80/',
        'http://127.0.0.1:8080/',
        'https://localhost/',
        'https://localhost:80/',
        'https://localhost:8080/',
        'https://127.0.0.1/',
        'https://127.0.0.1:80/',
        'https://127.0.0.1:8080/',
        'http://localhost:',
        'https://localhost:',
    ]
    for s in localhost:
        if uri.startswith(s):
            return  #phenny.reply('Sorry, access forbidden.')

    if not hasattr(phenny.config, 'blacklisted_urls'):
        phenny.config.blacklisted_urls = []
    if not hasattr(phenny.bot, 'blacklisted_urls'):
        phenny.bot.blacklisted_urls = []
        for s in phenny.config.blacklisted_urls:
            phenny.bot.blacklisted_urls.append(re.compile(s))
    for regex in phenny.bot.blacklisted_urls:
        if regex.match(uri):
            return

    try:
        redirects = 0
        while True:
            try:
                info = web.head(uri)

                if not isinstance(info, list):
                    status = '200'
                else:
                    status = str(info[1])
                    info = info[0]
            except web.HTTPError:
                try:
                    info = requests.get(uri,
                                        headers=web.default_headers,
                                        verify=True)
                    status = str(info.status_code)
                    info = info.headers
                except web.HTTPError:
                    return None

            if status.startswith('3'):
                uri = urllib.parse.urljoin(uri, info['Location'])
            else:
                break

            redirects += 1
            if redirects >= 25:
                return None

        try:
            mtype = info['content-type']
        except:
            return None

        if not mtype or not (('/html' in mtype) or ('/xhtml' in mtype)):
            return None

        try:
            bytes = web.get(uri)
        except:
            return None
        #bytes = u.read(262144)
        #u.close()

    except:
        return

    m = r_title.search(bytes)
    if m:
        title = m.group(1)
        title = title.strip()
        title = title.replace('\t', ' ')
        title = title.replace('\r', ' ')
        title = title.replace('\n', ' ')
        while '  ' in title:
            title = title.replace('  ', ' ')
        if len(title) > 200:
            title = title[:200] + '[...]'

        def e(m):
            entity = m.group(0)
            if entity.startswith('&#x'):
                cp = int(entity[3:-1], 16)
                return chr(cp)
            elif entity.startswith('&#'):
                cp = int(entity[2:-1])
                return chr(cp)
            else:
                char = name2codepoint[entity[1:-1]]
                return chr(char)

        title = r_entity.sub(e, title)

        if title:
            title = title.replace('\n', '')
            title = title.replace('\r', '')
            title = "[ {0} ]".format(title)

            posted = check_posted(phenny, input, uri)

            if posted:
                title = "{0} (posted: {1})".format(title, posted)

        else:
            title = None
    return title
コード例 #36
0
def tasteometer(phenny, input):
    input1 = input.group(2)
    if not input1 or len(input1) == 0:
        phenny.say("tasteometer: compares two users' musical taste")
        phenny.say("syntax: .taste user1 user2")
        return
    input2 = input.group(3)
    user1 = resolve_username(input1)
    if not user1:
        user1 = input1
    user2 = resolve_username(input2)
    if not user2:
        user2 = input2
    if not user2 or len(user2) == 0:
        user2 = resolve_username(input.nick)
        if not user2:
            user2 = input.nick
    try:
        req = web.get(
            "%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s"
            % (APIURL, web.quote(user1), web.quote(user2)))
    except web.HTTPError as e:
        if e.response.status_code == 400:
            phenny.say(
                "uhoh, someone doesn't exist on last.fm, perhaps they need to set user"
            )
            return
        else:
            phenny.say("uhoh. try again later, mmkay?")
            return
    root = etree.fromstring(req.encode('utf-8'))
    score = root.xpath('comparison/result/score')
    if len(score) == 0:
        phenny.say("something isn't right. have those users scrobbled?")
        return

    score = float(score[0].text)
    rating = ""
    if score >= 0.9:
        rating = "Super"
    elif score >= 0.7:
        rating = "Very High"
    elif score >= 0.5:
        rating = "High"
    elif score >= 0.3:
        rating = "Medium"
    elif score >= 0.1:
        rating = "Low"
    else:
        rating = "Very Low"

    artists = root.xpath("comparison/result/artists/artist/name")
    common_artists = ""
    names = []
    if len(artists) == 0:
        common_artists = ". they don't have any artists in common."
    else:
        list(map(lambda a: names.append(a.text), artists))
        common_artists = "and music they have in common includes: %s" % ", ".join(
            names)

    phenny.say("%s's and %s's musical compatibility rating is %s %s" %
               (user1, user2, rating, common_artists))
コード例 #37
0
ファイル: weather.py プロジェクト: lawrencehjf/jenni
def forecastio_current_weather(jenni, input):
    if not hasattr(jenni.config, 'forecastio_apikey'):
        return jenni.say(
            'Please sign up for a forecast.io API key at https://forecast.io/')

    txt = input.group(2)
    if not txt:
        return jenni.say('Please provide a location.')

    #name, county, region, countryName, lat, lng = location(txt)
    name, lat, lng = location(txt)
    if name == 'ImportError' and not lat and not lng:
        return install_geopy

    url = 'https://api.forecast.io/forecast/%s/%s,%s'

    url = url % (jenni.config.forecastio_apikey, urllib.quote(lat),
                 urllib.quote(lng))

    ## do some error checking
    try:
        ## if the Internet is working this should work, \o/
        page = web.get(url)
    except:
        ## well, crap, check your Internet, and if you can access forecast.io
        return jenni.say('Could not acess https://api.forecast.io/')

    try:
        ## we want tasty JSON
        data = json.loads(page)
    except:
        ## that wasn't tasty
        return jenni.say(
            'The server did not return anything that was readable as JSON.')

    if 'currently' not in data:
        ## doesn't happen until the GPS coords are completely bonkers
        return jenni.say(
            'No information obtained from forecast.io for the given location: %s,'
            % (
                name,
                lat,
                lng,
            ))

    ## let the fun begin!!

    today = data['currently']

    cond = today['icon']  ## 0.17
    cover = today['cloudCover']
    temp = today['temperature']
    dew = today['dewPoint']
    pressure = today['pressure']
    speed = today['windSpeed']
    degrees = today['windBearing']
    humidity = today['humidity']
    APtemp = today['apparentTemperature']

    ## this code is different than the section in the previous sectio
    ## as forecast.io uses more precise measurements than NOAA
    if cover >= 0.8:
        cover_word = 'Overcast'
    elif cover >= 0.5:
        cover_word = 'Cloudy'
    elif cover >= 0.2:
        cover_word = 'Scattered'
    else:
        cover_word = 'Clear'

    temp_c = (temp - 32) / 1.8
    temp = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (temp, temp_c)

    dew_c = (dew - 32) / 1.8
    dew = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (dew, dew_c)

    APtemp_c = (APtemp - 32) / 1.8
    APtemp = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (APtemp, APtemp_c)

    humidity = str(int(float(humidity) * 100)) + '%'

    pressure = '%.2fin (%.2fmb)' % (pressure * 0.0295301, pressure)
    cond = cond.replace('-', ' ')
    cond = cond.title()

    description = speed_desc(speed)

    degrees = wind_dir(degrees)

    ## value taken from, https://is.gd/3dNrbW
    speedC = 1.609344 * speed
    wind = '%s %.1fmph (%.1fkmh) (%s)' % (description, speed, speedC, degrees)

    ## ISO-8601 ftw, https://xkcd.com/1179/
    time = datetime.datetime.fromtimestamp(int(
        today['time'])).strftime('%Y-%m-%d %H:%M:%S')

    ## build output string.
    ## a bit messy, but better than other alternatives
    output = str()
    output += '\x1FCover\x1F: ' + cover_word
    output += ', \x1FTemp\x1F: ' + str(temp)
    output += ', \x1FDew Point\x1F: ' + str(dew)
    output += ', \x1FHumidity\x1F: ' + str(humidity)
    output += ', \x1FApparent Temp\x1F: ' + str(APtemp)
    output += ', \x1FPressure\x1F: ' + pressure
    if cond:
        output += ', \x1FCondition\x1F: ' + (cond).encode('utf-8')
    output += ', \x1FWind\x1F: ' + wind
    output += ' - '
    output += uc.encode(name)
    output + '; %s UTC' % (time)

    ## required according to ToS by forecast.io
    output += ' (Powered by Forecast, forecast.io)'
    jenni.say(output)
コード例 #38
0
ファイル: weather.py プロジェクト: NeoMahler/jenni
def forecast_wg(jenni, input):
    if not hasattr(jenni.config, 'wunderground_apikey'):
        return jenni.say('Please sign up for a wunderground.com API key at http://www.wunderground.com/weather/api/ or try .wx-noaa or .weather-noaa')

    apikey = jenni.config.wunderground_apikey

    url = 'https://api.wunderground.com/api/%s/forecast10day/geolookup/q/%s.json'

    txt = input.group(2)
    if not txt:
        return jenni.say('No input provided. Please provide a locaiton.')

    url_new = url % (apikey, txt)

    try:
        page = web.get(url_new)
    except:
        return jenni.say("We could not access wunderground.com's API at the moment.")

    try:
        useful = json.loads(page)
    except:
        return jenni.say('We could not obtain useful information from the wunderground.com API.')

    if 'response' in useful and 'error' in useful['response'] and 'description' in useful['response']['error']:
        return jenni.say(str(useful['response']['error']['description']))


    days = useful['forecast']['simpleforecast']['forecastday']
    forecast_text = useful['forecast']['txt_forecast']['forecastday']

    days_text = list()

    for each in forecast_text:
        txt = each['fcttext']
        temp = txt.split('. Low')
        temp = temp[0]

        temp = temp.split('. High')
        temp = temp[0]

        days_text.append(temp)

    city = useful['location']['city']

    region = str()
    if 'state' in useful['location']:
        region += useful['location']['state']

    country = useful['location']['country_name']

    def preface_location(ci, reg, cty):
        out = str()
        out += '[' + ci
        if reg:
            out += ', ' + reg
        out += ', %s] ' % (country)
        return out

    output = preface_location(city, region, country)
    output_second = preface_location(city, region, country)

    k = 0
    for day in days:
        day_of_week = day['date']['weekday_short']
        conditions = day['conditions']
        highs = u'\x02\x0304%s\u00B0F (%s\u00B0C)\x03\x02' % (day['high']['fahrenheit'], day['high']['celsius'])
        lows = u'\x02\x0302%s\u00B0F (%s\u00B0C)\x03\x02' % (day['low']['fahrenheit'], day['low']['celsius'])
        #wind = 'From %s at %s-mph (%s-kph)' % (day['avewind']['dir'], day['maxwind']['mph'], day['maxwind']['kph'])

        temp = '\x02\x0310%s\x03\x02: %s / %s, \x1FConditions\x1F: %s. Eve: %s | ' % (day_of_week, highs, lows, days_text[k], days_text[k + 1])

        k += 1
        if k <= 2:
            output += temp
        elif 4 >= k > 2:
            output_second += temp

    output = output[:-3]
    output_second = output_second[:-3]

    jenni.say(output)
    jenni.say(output_second)
コード例 #39
0
ファイル: weather.py プロジェクト: NeoMahler/jenni
def location(name):
    name = urllib.quote(name.encode('utf-8'))
    uri = "http://www.geonames.org/search.html?q=%s" % (name)
    in_usa = False
    if re.match('\d{5}', name):
        uri += '&country=us'
        in_usa = True
    page = web.get(uri)

    line = re_line.findall(page)
    if not line:
        return ('?', '?', '?', '?', '?', '?',)
    line = line[0]

    find_lat = re_lat.findall(line)

    find_lng = re_long.findall(line)

    find_cnty = re_cnty.findall(line)

    find_city = re_city.findall(line)

    find_region = re_region.findall(line)

    find_county = re_county.findall(line)


    name = '?'
    countryName = '?'
    lng = '0'
    lat = '0'
    region = '?'
    county = '?'


    if find_city:
        name = clean(find_city[0])
    if find_cnty:
        countryName = clean(find_cnty[0])
    if find_lat:
        lat = clean(find_lat[0])
    if find_lng:
        lng = clean(find_lng[0])
    if find_region:
        region = clean(find_region[0])
    if find_county:
        county = clean(find_county[0])

    if in_usa:
        re_columns = re.compile('<td.*?>(.*?)</td>')
        columns = re_columns.findall(line)
        if lat == '0' and lng == '0':
            gps_clean = clean(columns[-1])
            gps = gps_clean.split('/')
            lat = clean(gps[0]).replace('&nbsp;', '')
            lng = clean(gps[1]).replace('&nbsp;', '')
        if name == '?':
            name = clean(columns[0])
        if countryName == '?':
            countryName = clean(columns[2])
        if region == '?':
            region = clean(columns[3])

    #print 'name', name
    #print 'county', county
    #print 'region', region
    #print 'countryName', countryName
    #print 'lat', lat
    #print 'lng', lng
    #print ''

    return name, county, region, countryName, lat, lng
コード例 #40
0
ファイル: units.py プロジェクト: prodigeni/jenni
def btc_page():
    try:
        page = web.get('https://api.bitcoincharts.com/v1/markets.json')
    except Exception, e:
        print time.time(), e
        return False, 'Failed to reach bitcoincharts.com'
コード例 #41
0
ファイル: units.py プロジェクト: prodigeni/jenni
def btc_coinbase_page():
    try:
        page = web.get('https://coinbase.com/api/v1/currencies/exchange_rates')
    except Exception, e:
        print time.time(), e
        return False, 'Failed to reach coinbase.com'
コード例 #42
0
def tfw(phenny, input, fahrenheit=False, celsius=False, mev=False):
    """.tfw <city/zip> - Show the f*****g weather at the specified location."""

    where = input.group(2)
    if not where:
        # default to Blacksburg, VA
        icao_code = "KBCB"
    else:
        icao_code = weather.code(phenny, where)

    if not icao_code:
        phenny.say("WHERE THE F**K IS THAT? Try another location.")
        return

    uri = 'http://weather.noaa.gov/pub/data/observations/metar/stations/%s.TXT'
    try:
        bytes = web.get(uri % icao_code)
    except AttributeError:
        raise GrumbleError(
            "THE INTERNET IS F*****G BROKEN. Please try again later.")
    except web.HTTPError:
        phenny.say("WHERE THE F**K IS THAT? Try another location.")
        return

    if 'Not Found' in bytes:
        phenny.say("WHERE THE F**K IS THAT? Try another location.")
        return

    w = metar.parse(bytes)
    tempf = w.temperature * 9 / 5 + 32

    # add units and convert if necessary
    if fahrenheit:
        temp = "{0:d}°F‽".format(int(tempf))
    elif celsius:
        temp = "{0:d}°C‽".format(w.temperature)
    else:
        tempev = (w.temperature + 273.15) * 8.6173324e-5 / 2
        if mev:
            temp = "{0:f} meV‽".format(tempev * 1000)
        else:
            temp = "{0:f} Meters‽".format(tempev * 12.39842)

    if w.temperature < 6:
        remark = "IT'S F*****G COLD"
        flavors = [
            "Where's the cat? Oh shit. Fluffy's frozen.",
            "Nothing a few shots couldn't fix", "Should have gone south",
            "You think this is cold? Have you been to upstate New York?",
            "Why do I live here?", "wang icicles.",
            "Freezing my balls off out here", "F**k this place.",
            "GREAT! If you're a penguin.", "Fresh off the tap.",
            "Fantastic do-nothing weather.", "Put on some f*****g socks.",
            "Blue balls x 2",
            "Good news, food won't spoil nearly as fast outside. Bad news, who cares?",
            "Really?", "Wear a f*****g jacket.",
            "I hear Siberia is the same this time of year.",
            "NOT F*****G JOGGING WEATHER", "Shrinkage's best friend.",
            "Warmer than Hoth.", "Good baby making weather.",
            "Where's a Tauntaun when you need one?",
            "My nipples could cut glass", "Global Warming? Bullshit.",
            "Call your local travel agency and ask them if they're serious.",
            "Freezing my balls off IN here",
            "I'm not sure how you can stand it", "I'm sorry.",
            "Even penguins are wearing jackets.",
            "Keep track of your local old people.",
            "WHAT THE F**K DO YOU MEAN IT'S NICER IN ALASKA?",
            "Sock warmers are go. Everywhere.",
            "Why does my car feel like a pair of ice skates?",
            "Actually, a sharp-stick in the eye might not all be that bad right now.",
            "THO Season.", "It's a tit-bit nipplie.",
            "Anything wooden will make a good fireplace. Thank us later.",
            "MOVE THE F**K ON GOLDILOCKS",
            "I'm defrosting inside of my freezer.",
            "It's time for a vacation.",
            "It's bone chilling cold out. Sorry ladies."
        ]
    elif w.temperature < 20:
        remark = "IT'S F*****G...ALRIGHT"
        flavors = [
            "Might as well rain, I'm not going out in that.",
            "Better than a sharp stick in the eye.",
            "Everything's nice butter weather!",
            "At least you aren't living in a small town in Alaska",
            "It could be worse.", "F*****G NOTHING TO SEE HERE",
            "Listen, weather. We need to have a talk.",
            "OH NO. THE WEATHER MACHINE IS BROKEN.",
            "An Eskimo would beat your ass to be here",
            "Where life is mediocre",
            "Can't complain about today, but I want to!",
            "Maybe inviting the inlaws over will improve today.",
            "Let's go to the beach! In three months when it's nice again...",
            "From inside it looks nice out.", "WHAT THE F**K EVER",
            "I love keeping the heat on for this long.",
            "Inside or outside? Either way it's still today.",
            "It's either only going to get better or worse from here!",
            "If it's raining cats and dogs, hope you're not a pet person.",
            "Today makes warm showers way nicer.",
            "Here's to making your blankets feel useful.",
            "I've seen better days",
            "Compared to how awful it's been this is great!",
            "If we go running maybe we won't notice.",
            "Is that the sun outside? Why isn't it doing anything?",
            "Well, at least we're not in prison.",
            "Slap me around and call me Sally. It'd be an improvement.",
            "Today is the perfect size, really honey.",
            "Maybe Jersey Shore is on tonight."
        ]
    elif w.temperature < 27:
        remark = "IT'S F*****G NICE"
        flavors = [
            "I made today breakfast in bed.", "F*****G SWEET",
            "Quit your bitching", "Enjoy.", "IT'S ABOUT F*****G TIME",
            "READ A F****N' BOOK", "LETS HAVE A F*****G PICNIC",
            "It is safe to take your ball-mittens off.", "More please.",
            "uh, can we trade?", "WOO, Spring Break!",
            "I can't believe it's not p**n!", "I approve of this message!",
            "Operation beach volleyball is go.", "Plucky ducky kinda day.",
            "Today called just to say \"Hi.\"",
            "STOP AND SMELL THE F*****G ROSES",
            "F*****G NOTHING WRONG WITH TODAY", "LETS HAVE A F*****G SOIREE",
            "What would you do for a holyshititsniceout bar?",
            "There are no rules today, blow shit up!",
            "Celebrate Today's Day and buy your Today a present so it knows you care.",
            "I feel bad about playing on my computer all day.",
            "Party in the woods.", "It is now safe to leave your home.",
            "PUT A F*****G CAPE ON TODAY, BECAUSE IT'S SUPER",
            "Today is like \"ice\" if it started with an \"n\". F**k you, we don't mean nce.",
            "Water park! Water drive! Just get wet!",
            "The geese are on their way back! Unless you live where they migrate to for the winter.",
            "F*****G AFFABLE AS SHIT", "Give the sun a raise!",
            "Today is better than an original holographic Charizard. Loser!"
        ]
    else:
        remark = "IT'S F*****G HOT"
        flavors = [
            "Do you have life insurance?",
            "Like super models, IT'S TOO F*****G HOT.",
            "Not even PAM can make me not stick to this seat", "SWIMMIN HOLE!",
            "Time to crank the AC.",
            "THE F*****G EQUATOR CALLED, AND IT'S JEALOUS.",
            "Looked in the fridge this morning for some eggs. They're already cooked.",
            "Keeping the AC business in business.",
            "I burned my feet walking on grass.",
            "times you wish you didn't have leather seats",
            "Isn't the desert nice this time of year?",
            "Why, oh why did we decide to live in an oven?",
            "It's hotter outside than my fever.",
            "I recommend staying away from fat people.", "TAKE IT OFF!",
            "Even your frigid girlfriend can't save you from today.",
            "I need gloves to touch the steering wheel.",
            "Lock up yo' ice cream trucks, lock up yo' wife.",
            "F*****G SUNBURNED, AND I WAS INSIDE ALL DAY.",
            "F**k this shit, I'm moving back to Alaska."
        ]

    if w.descriptor == "thunderstorm":
        remark += " AND THUNDERING"
    elif w.precipitation in ("snow", "snow grains"):
        remark += " AND SNOWING"
    elif w.precipitation in ("drizzle", "rain", "unknown precipitation"):
        remark += " AND WET"
    elif w.precipitation in ("ice crystals", "ice pellets"):
        remark += " AND ICY"
    elif w.precipitation in ("hail", "small hail"):
        remark += " AND HAILING"

    if int(tempf) == 69:
        remark = "IT'S F*****G SEXY TIME"
        flavors = [
            "Why is 77 better than 69? You get eight more.",
            "What comes after 69? Mouthwash.",
            "If you are given two contradictory orders, obey them both.",
            "a good f****n' time! ;)",
            "What's the square root of 69? Eight something."
        ]

    flavor = random.choice(flavors)

    response = "{temp} {remark} - {flavor} - {location} {time}Z".format(
        temp=temp,
        remark=remark,
        flavor=flavor,
        location=w.station,
        time=w.time.strftime("%H:%M"))
    phenny.say(response)
コード例 #43
0
ファイル: wikipedia.py プロジェクト: philphilphil/philly
def wikipedia(term, language='en', last=False):
    global wikiuri
    if not '%' in term:
        if isinstance(term, str):
            t = term.encode('utf-8')
        else:
            t = term
        q = urllib.parse.quote(t)
        u = wikiuri % (language, q)
        bytes = web.get(u)
    else:
        bytes = web.get(wikiuri % (language, term))

    if bytes.startswith('\x1f\x8b\x08\x00\x00\x00\x00\x00'):
        f = io.StringIO(bytes)
        f.seek(0)
        gzip_file = gzip.GzipFile(fileobj=f)
        bytes = gzip_file.read()
        gzip_file.close()
        f.close()

    bytes = r_tr.sub('', bytes)

    if not last:
        r = r_redirect.search(bytes[:4096])
        if r:
            term = urllib.parse.unquote(r.group(1))
            return wikipedia(term, language=language, last=True)

    paragraphs = r_paragraph.findall(bytes)

    if not paragraphs:
        if not last:
            term = search(term)
            return wikipedia(term, language=language, last=True)
        return None

    # Pre-process
    paragraphs = [
        para for para in paragraphs
        if (para and 'technical limitations' not in para
            and 'window.showTocToggle' not in para and 'Deletion_policy' not in
            para and 'Template:AfD_footer' not in para
            and not (para.startswith('<p><i>') and para.endswith('</i></p>'))
            and not 'disambiguation)"' in para) and not '(images and media)' in
        para and not 'This article contains a' in para
        and not 'id="coordinates"' in para and not 'class="thumb' in para
    ]
    # and not 'style="display:none"' in para]

    for i, para in enumerate(paragraphs):
        para = para.replace('<sup>', '|')
        para = para.replace('</sup>', '|')
        paragraphs[i] = text(para).strip()

    # Post-process
    paragraphs = [
        para for para in paragraphs
        if (para and not (para.endswith(':') and len(para) < 150))
    ]

    para = text(paragraphs[0])
    m = r_sentence.match(para)

    if not m:
        if not last:
            term = search(term)
            return wikipedia(term, language=language, last=True)
        return None
    sentence = m.group(0)

    maxlength = 275
    if len(sentence) > maxlength:
        sentence = sentence[:maxlength]
        words = sentence[:-5].split(' ')
        words.pop()
        sentence = ' '.join(words) + ' [...]'

    if (('using the Article Wizard if you wish' in sentence)
            or ('or add a request for it' in sentence)
            or ('in existing articles' in sentence)):
        if not last:
            term = search(term)
            return wikipedia(term, language=language, last=True)
        return None

    sentence = '"' + sentence.replace('"', "'") + '"'
    sentence = sentence.decode('utf-8').encode('utf-8')
    wikiuri = wikiuri.decode('utf-8').encode('utf-8')
    term = term.decode('utf-8').encode('utf-8')
    return sentence + ' - ' + (wikiuri % (language, term))
コード例 #44
0
ファイル: weather.py プロジェクト: lawrencehjf/jenni
def weather_wunderground(jenni, input):
    if not hasattr(jenni.config, 'wunderground_apikey'):
        return jenni.say(
            'Please sign up for a wunderground.com API key at http://www.wunderground.com/weather/api/ or try .wx-noaa or .weather-noaa'
        )

    apikey = jenni.config.wunderground_apikey

    url = 'http://api.wunderground.com/api/%s/conditions/geolookup/q/%s.json'
    txt = input.group(2)
    if not txt:
        return jenni.say('No input provided. Please provide a locaiton.')

    txt = txt.encode('utf-8')

    url_new = url % (apikey, urllib.quote(txt))

    try:
        page = web.get(url_new)
    except:
        return jenni.say(
            "We could not access wunderground.com's API at the moment.")

    useful = False

    try:
        useful = json.loads(page)
    except:
        return jenni.say(
            'We could not obtain useful information from the wunderground.com API.'
        )

    if 'response' in useful and 'error' in useful[
            'response'] and 'description' in useful['response']['error']:
        return jenni.say(str(useful['response']['error']['description']))

    if 'current_observation' not in useful:
        return jenni.say('No observations currently found.')
    current = useful['current_observation']

    condition = current['weather']
    temp = current['temperature_string']
    wind_kph = current['wind_kph']
    wind_mph = current['wind_mph']
    wind_dir = current['wind_dir']
    feelslike = current['feelslike_string']
    dewpoint = current['dewpoint_string']
    location = current['observation_location']['full']
    time_updated = current['observation_time']
    precip_today = current['precip_today_string']
    #precip_1hr = current['precip_1hr_string']
    rh = current['relative_humidity']
    pressure_in = current['pressure_in']
    pressure_mb = current['pressure_mb']
    pressure_trend = current['pressure_trend']

    if pressure_trend == '-':
        #pressure_trend = re.sub('-', u'\u2193', pressure_trend)
        pressure_trend = u'\u2193'
    elif pressure_trend == '+':
        #pressure_trend = re.sub('\+', u'\u2191', pressure_trend)
        pressure_trend = u'\u2191'
    elif pressure_trend == '0':
        pressure_trend = u'\u2192'

    time_updated = re.sub('Last Updated on ', '\x1FLast Updated\x1F: ',
                          time_updated)

    output = str()
    output += '\x1FCover\x1F: ' + condition
    output += ', \x1FTemp\x1F: ' + add_degree(temp)
    output += ', \x1FDew Point\x1F: ' + add_degree(dewpoint)
    output += ', \x1FHumdity\x1F: ' + rh
    output += ', \x1FFeels Like\x1F: ' + add_degree(feelslike)
    output += ', \x1FPressure\x1F: ' + '[%s] %sin (%smb)' % (
        pressure_trend, pressure_in, pressure_mb)
    output += ', \x1FWind\x1F: ' + 'From the %s at %s mph (%s kmh)' % (
        wind_dir, wind_mph, wind_kph)
    output += ', \x1FLocation\x1F: ' + (
        location).encode('utf-8').decode('utf-8')
    output += ', ' + time_updated

    output += ', (Powered by wunderground.com)'

    jenni.say(output)
コード例 #45
0
def getquote(code, input):
    page = web.get(quoteuri)
    paragraphs = r_paragraph.findall(page)
    line = re.sub(r'<[^>]*?>', '', unicode(paragraphs[0]))
    code.say(code.bold(line.lower().capitalize() + "."))
コード例 #46
0
def catfacts_ajax():
    uri = 'http://facts.cat/getfact'
    bytes = web.get(uri)
    return web.json(bytes)
コード例 #47
0
def now_playing(phenny, input):
    nick = input.nick
    user = ""
    arg = input.group(2)
    if not arg or len(arg.strip()) == 0:
        user = resolve_username(nick)  # use the sender
        if not user:  #nick didnt resolve
            user = nick
    else:  # use the argument
        user = resolve_username(arg.strip())
        if not user:  # user didnt resolve
            user = arg
    user = user.strip()
    try:
        req = web.get("%smethod=user.getrecenttracks&user=%s" %
                      (APIURL, web.quote(user)))
    except web.HTTPError as e:
        if e.response.status_code == 400:
            phenny.say(
                "%s doesn't exist on last.fm, perhaps they need to set user" %
                (user))
            return
        else:
            phenny.say("uhoh. try again later, mmkay?")
            return
    root = etree.fromstring(req.encode('utf-8'))
    recenttracks = list(root)
    if len(recenttracks) == 0:
        phenny.say(
            "%s hasn't played anything recently. this isn't you? try lastfm-set"
            % (user))
        return
    tracks = list(recenttracks[0])
    #print(etree.tostring(recenttracks[0]))
    if len(tracks) == 0:
        phenny.say(
            "%s hasn't played anything recently. this isn't you? try lastfm-set"
            % (user))
        return
    first = tracks[0]
    now = True if first.get("nowplaying") == "true" else False
    tags = {}
    for e in first.getiterator():
        tags[e.tag] = e

    track = tags['name'].text.strip()

    artist = tags['artist'].text.strip()

    album = "unknown"
    if tags['album'].text:
        album = tags['album'].text

    date = None
    stamp = None
    if not now:
        date = tags['date'].get("uts")
        stamp = int(date)

    if now:
        present = get_verb(nick)[1]
        phenny.say("%s %s \"%s\" by %s on %s" %
                   (user.strip(), present.strip(), track, artist, album))
        return
    else:
        past = get_verb(nick)[0]
        phenny.say("%s %s \"%s\" by %s on %s %s" %
                   (user.strip(), past.strip(), track, artist, album,
                    pretty_date(stamp)))
コード例 #48
0
 def _fetch():
     wf().logger.info("retrieving releases for %r ...", repo)
     r = web.get(url)
     r.raise_for_status()
     return r.content
コード例 #49
0
ファイル: movies.py プロジェクト: wolfy1339/Kenni
def movie(kenni, input):
    '''.omdb movie/show title -- displays information about a production'''
    if not hasattr(kenni.config,'omdb_apikey'):
        return kenni,say("Please sign up for an OMDb apikey")
    API_BASE_URL = "http://www.omdbapi.com/?apikey=" +  kenni.config.omdb_apikey + "&"
    if not input.group(2):
        return kenni.say('Please enter a movie or TV show title. '
                         'Year is optional.')

    word = input.group(2).rstrip()
    matchObj = re.match(r'([\w\s]*)\s?,\s?(\d{4})', word, re.M | re.I)

    if matchObj:
        title = matchObj.group(1)
        year = matchObj.group(2)
        title = prep_title(title)
        uri = API_BASE_URL + 't=%s&y=%s&plot=short&r=json' % (title, year)
    else:
        title = word
        title = prep_title(title)
        uri = API_BASE_URL + 't=%s&plot=short&r=json' % (title)

    try:
        page = web.get(uri)
    except:
        return kenni.say('[OMDB] Connection to API did not succeed.')

    try:
        data = json.loads(page)
    except:
        return kenni.say("[OMDB] Couldn't make sense of information from API")

    message = '[OMDB] '

    if data['Response'] == 'False':
        if 'Error' in data:
            message += data['Error']
        else:
            message += 'Got an error from omdbapi'
    else:
        pre_plot_output = 'Title: {0} | Released: {1} | Rated: {2} '
        pre_plot_output += '| Rating: {3} | Metascore: {4} | Genre: {5} '
        pre_plot_output += '| Runtime: {6} | Plot: '
        genre = data['Genre']
        runtime = data['Runtime']
        pre_plot = pre_plot_output.format(data['Title'], data['Released'],
                                          data['Rated'], data['imdbRating'],
                                          data['Metascore'], genre,
                                          runtime)

        after_plot_output = ' | IMDB Link: http://imdb.com/title/{0}'
        after_plot = after_plot_output.format(data['imdbID'])
        truncation = '[...]'

        ## 510 - (16 + 8 + 63)
        ## max_chars (minus \r\n) - (max_nick_length + max_ident_length
        ##     + max_vhost_lenth_on_freenode)
        max_len_of_plot = 423 - (len(pre_plot) + len(after_plot) + len(truncation))

        new_plot = data['Plot']
        if len(data['Plot']) > max_len_of_plot:
            new_plot = data['Plot'][:max_len_of_plot] + truncation

        message = pre_plot + new_plot + after_plot

    kenni.say(message)
コード例 #50
0
ファイル: update.py プロジェクト: trunda/my-mac
 def retrieve_releases():
     log.info('Retriving releases for `{}` ...'.format(github_slug))
     return web.get(api_url).json()
コード例 #51
0
ファイル: weather.py プロジェクト: endenizen/torp
def f_weather(self, origin, match, args): 
   """.weather <ICAO> - Show the weather at airport with the code <ICAO>."""
   if origin.sender == '#talis': 
      if args[0].startswith('.weather '): return

   icao_code = match.group(2)
   if not icao_code: 
      return self.msg(origin.sender, 'Try .weather London, for example?')

   icao_code = code(self, icao_code)

   if not icao_code: 
      self.msg(origin.sender, 'No ICAO code found, sorry')
      return

   uri = 'http://weather.noaa.gov/pub/data/observations/metar/stations/%s.TXT'
   try: bytes = web.get(uri % icao_code)
   except AttributeError: 
      raise GrumbleError('OH CRAP NOAA HAS GONE DOWN THE WEB IS BROKEN')
   if 'Not Found' in bytes: 
      self.msg(origin.sender, icao_code+': no such ICAO code, or no NOAA data')
      return

   metar = bytes.splitlines().pop()
   metar = metar.split(' ')

   if len(metar[0]) == 4: 
      metar = metar[1:]

   if metar[0].endswith('Z'): 
      time = metar[0]
      metar = metar[1:]
   else: time = None

   if metar[0] == 'AUTO': 
      metar = metar[1:]
   if metar[0] == 'VCU': 
      self.msg(origin.sender, icao_code + ': no data provided')
      return

   if metar[0].endswith('KT'): 
      wind = metar[0]
      metar = metar[1:]
   else: wind = None

   if ('V' in metar[0]) and (metar[0] != 'CAVOK'): 
      vari = metar[0]
      metar = metar[1:]
   else: vari = None

   if ((len(metar[0]) == 4) or 
       metar[0].endswith('SM')): 
      visibility = metar[0]
      metar = metar[1:]
   else: visibility = None

   while metar[0].startswith('R') and (metar[0].endswith('L') 
                                    or 'L/' in metar[0]): 
      metar = metar[1:]

   if len(metar[0]) == 6 and (metar[0].endswith('N') or 
                              metar[0].endswith('E') or 
                              metar[0].endswith('S') or 
                              metar[0].endswith('W')): 
      metar = metar[1:] # 7000SE?

   cond = []
   while (((len(metar[0]) < 5) or 
          metar[0].startswith('+') or 
          metar[0].startswith('-')) and (not (metar[0].startswith('VV') or
          metar[0].startswith('SKC') or metar[0].startswith('CLR') or 
          metar[0].startswith('FEW') or metar[0].startswith('SCT') or 
          metar[0].startswith('BKN') or metar[0].startswith('OVC')))): 
      cond.append(metar[0])
      metar = metar[1:]

   while '/P' in metar[0]: 
      metar = metar[1:]

   if not metar: 
      self.msg(origin.sender, icao_code + ': no data provided')
      return

   cover = []
   while (metar[0].startswith('VV') or metar[0].startswith('SKC') or
          metar[0].startswith('CLR') or metar[0].startswith('FEW') or
          metar[0].startswith('SCT') or metar[0].startswith('BKN') or
          metar[0].startswith('OVC')): 
      cover.append(metar[0])
      metar = metar[1:]
      if not metar: 
         self.msg(origin.sender, icao_code + ': no data provided')
         return

   if metar[0] == 'CAVOK': 
      cover.append('CLR')
      metar = metar[1:]

   if metar[0] == 'PRFG': 
      cover.append('CLR') # @@?
      metar = metar[1:]

   if metar[0] == 'NSC': 
      cover.append('CLR')
      metar = metar[1:]

   if ('/' in metar[0]) or (len(metar[0]) == 5 and metar[0][2] == '.'): 
      temp = metar[0]
      metar = metar[1:]
   else: temp = None

   if metar[0].startswith('QFE'): 
      metar = metar[1:]

   if metar[0].startswith('Q') or metar[0].startswith('A'): 
      pressure = metar[0]
      metar = metar[1:]
   else: pressure = None

   if time: 
      hour = time[2:4]
      minute = time[4:6]
      time = local(icao_code, hour, minute)
   else: time = '(time unknown)'

   if wind: 
      speed = int(wind[3:5])
      if speed < 1: 
         description = 'Calm'
      elif speed < 4: 
         description = 'Light air'
      elif speed < 7: 
         description = 'Light breeze'
      elif speed < 11: 
         description = 'Gentle breeze'
      elif speed < 16: 
         description = 'Moderate breeze'
      elif speed < 22: 
         description = 'Fresh breeze'
      elif speed < 28: 
         description = 'Strong breeze'
      elif speed < 34: 
         description = 'Near gale'
      elif speed < 41: 
         description = 'Gale'
      elif speed < 48: 
         description = 'Strong gale'
      elif speed < 56: 
         description = 'Storm'
      elif speed < 64: 
         description = 'Violent storm'
      else: description = 'Hurricane'

      degrees = wind[0:3]
      if degrees == 'VRB': 
         degrees = u'\u21BB'.encode('utf-8')
      elif (degrees <= 22.5) or (degrees > 337.5): 
         degrees = u'\u2191'.encode('utf-8')
      elif (degrees > 22.5) and (degrees <= 67.5): 
         degrees = u'\u2197'.encode('utf-8')
      elif (degrees > 67.5) and (degrees <= 112.5): 
         degrees = u'\u2192'.encode('utf-8')
      elif (degrees > 112.5) and (degrees <= 157.5): 
         degrees = u'\u2198'.encode('utf-8')
      elif (degrees > 157.5) and (degrees <= 202.5): 
         degrees = u'\u2193'.encode('utf-8')
      elif (degrees > 202.5) and (degrees <= 247.5): 
         degrees = u'\u2199'.encode('utf-8')
      elif (degrees > 247.5) and (degrees <= 292.5): 
         degrees = u'\u2190'.encode('utf-8')
      elif (degrees > 292.5) and (degrees <= 337.5): 
         degrees = u'\u2196'.encode('utf-8')

      if not icao_code.startswith('EN') and not icao_code.startswith('ED'): 
         wind = '%s %skt (%s)' % (description, speed, degrees)
      elif icao_code.startswith('ED'): 
         kmh = int(round(speed * 1.852, 0))
         wind = '%s %skm/h (%skt) (%s)' % (description, kmh, speed, degrees)
      elif icao_code.startswith('EN'): 
         ms = int(round(speed * 0.514444444, 0))
         wind = '%s %sm/s (%skt) (%s)' % (description, ms, speed, degrees)
   else: wind = '(wind unknown)'

   if visibility: 
      visibility = visibility + 'm'
   else: visibility = '(visibility unknown)'

   if cover: 
      level = None
      for c in cover: 
         if c.startswith('OVC') or c.startswith('VV'): 
            if (level is None) or (level < 8): 
               level = 8
         elif c.startswith('BKN'): 
            if (level is None) or (level < 5): 
               level = 5
         elif c.startswith('SCT'): 
            if (level is None) or (level < 3): 
               level = 3
         elif c.startswith('FEW'): 
            if (level is None) or (level < 1): 
               level = 1
         elif c.startswith('SKC') or c.startswith('CLR'): 
            if level is None: 
               level = 0

      if level == 8: 
         cover = u'Overcast \u2601'.encode('utf-8')
      elif level == 5: 
         cover = 'Cloudy'
      elif level == 3: 
         cover = 'Scattered'
      elif (level == 1) or (level == 0): 
         cover = u'Clear \u263C'.encode('utf-8')
      else: cover = 'Cover Unknown'
   else: cover = 'Cover Unknown'

   if temp: 
      if '/' in temp: 
         temp = temp.split('/')[0]
      else: temp = temp.split('.')[0]
      if temp.startswith('M'): 
         temp = '-' + temp[1:]
      try: temp = int(temp)
      except ValueError: temp = '?'
   else: temp = '?'

   if pressure: 
      if pressure.startswith('Q'): 
         pressure = pressure.lstrip('Q')
         if pressure != 'NIL': 
            pressure = str(int(pressure)) + 'mb'
         else: pressure = '?mb'
      elif pressure.startswith('A'): 
         pressure = pressure.lstrip('A')
         if pressure != 'NIL': 
            inches = pressure[:2] + '.' + pressure[2:]
            mb = int(float(inches) * 33.7685)
            pressure = '%sin (%smb)' % (inches, mb)
         else: pressure = '?mb'

         if isinstance(temp, int): 
            f = round((temp * 1.8) + 32, 2)
            temp = u'%s\u2109 (%s\u2103)'.encode('utf-8') % (f, temp)
   else: pressure = '?mb'
   if isinstance(temp, int): 
      temp = u'%s\u2103'.encode('utf-8') % temp

   if cond: 
      conds = cond
      cond = ''

      intensities = {
         '-': 'Light', 
         '+': 'Heavy'
      }

      descriptors = {
         'MI': 'Shallow', 
         'PR': 'Partial', 
         'BC': 'Patches', 
         'DR': 'Drifting', 
         'BL': 'Blowing', 
         'SH': 'Showers of', 
         'TS': 'Thundery', 
         'FZ': 'Freezing', 
         'VC': 'In the vicinity:'
      }

      phenomena = {
         'DZ': 'Drizzle', 
         'RA': 'Rain', 
         'SN': 'Snow', 
         'SG': 'Snow Grains', 
         'IC': 'Ice Crystals', 
         'PL': 'Ice Pellets', 
         'GR': 'Hail', 
         'GS': 'Small Hail', 
         'UP': 'Unknown Precipitation', 
         'BR': 'Mist', 
         'FG': 'Fog', 
         'FU': 'Smoke', 
         'VA': 'Volcanic Ash', 
         'DU': 'Dust', 
         'SA': 'Sand', 
         'HZ': 'Haze', 
         'PY': 'Spray', 
         'PO': 'Whirls', 
         'SQ': 'Squalls', 
         'FC': 'Tornado', 
         'SS': 'Sandstorm', 
         'DS': 'Duststorm', 
         # ? Cf. http://swhack.com/logs/2007-10-05#T07-58-56
         'TS': 'Thunderstorm', 
         'SH': 'Showers'
      }

      for c in conds: 
         if c.endswith('//'): 
            if cond: cond += ', '
            cond += 'Some Precipitation'
         elif len(c) == 5: 
            intensity = intensities[c[0]]
            descriptor = descriptors[c[1:3]]
            phenomenon = phenomena.get(c[3:], c[3:])
            if cond: cond += ', '
            cond += intensity + ' ' + descriptor + ' ' + phenomenon
         elif len(c) == 4: 
            descriptor = descriptors.get(c[:2], c[:2])
            phenomenon = phenomena.get(c[2:], c[2:])
            if cond: cond += ', '
            cond += descriptor + ' ' + phenomenon
         elif len(c) == 3: 
            intensity = intensities.get(c[0], c[0])
            phenomenon = phenomena.get(c[1:], c[1:])
            if cond: cond += ', '
            cond += intensity + ' ' + phenomenon
         elif len(c) == 2: 
            phenomenon = phenomena.get(c, c)
            if cond: cond += ', '
            cond += phenomenon

   # if not cond: 
   #    format = u'%s at %s: %s, %s, %s, %s'
   #    args = (icao, time, cover, temp, pressure, wind)
   # else: 
   #    format = u'%s at %s: %s, %s, %s, %s, %s'
   #    args = (icao, time, cover, temp, pressure, cond, wind)

   if not cond: 
      format = u'%s, %s, %s, %s - %s %s'
      args = (cover, temp, pressure, wind, str(icao_code), time)
   else: 
      format = u'%s, %s, %s, %s, %s - %s, %s'
      args = (cover, temp, pressure, cond, wind, str(icao_code), time)

   self.msg(origin.sender, format.encode('utf-8') % args)
コード例 #52
0
More info:
 * jenni: https://github.com/myano/jenni/
 * Phenny: http://inamidst.com/phenny/
"""

import BeautifulSoup
import re
import urllib2
import web

BS = BeautifulSoup.BeautifulSoup

uri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'
r_tag = re.compile(r'<(?!!)[^>]+>')
page = web.get(uri)
soup = BS(page)


def gettld(jenni, input):
    '''.tld .sh -- displays information about a given top level domain.'''
    text = input.group(2)
    if not text:
        jenni.reply("You didn't provide any input.")
        return
    text = text.split()[0]
    if text and text.startswith('.'):
        text = text[1:]
    text = text.encode('utf-8')

    tlds = soup.findAll('tr', {'valign': 'top'})
コード例 #53
0
ファイル: search.py プロジェクト: ZetaRift/CompuBot
def forecast_search(query, phenny):
    if hasattr(phenny.config, 'wunderground_api_key'):
        query = query.replace('!', '')
        query = query.replace(' ', '_')
        query = web.quote(query)
        uri = 'https://api.wunderground.com/api/' + phenny.config.wunderground_api_key + '/conditions/forecast/alerts/q/' + query + '.json'
        rec_bytes = web.get(uri)
        jsonstring = json.loads(rec_bytes)
        wferror = 0
        try:
            wferrorexist = jsonstring['response']['error']['type']
            wferror = 1
        except:
            wferror = 0

        if wferror is 1:
            wferrortype = jsonstring['response']['error']['type']
            wferrordesc = jsonstring['response']['error']['description']
            wferrorfull = 'Error Code: ' + wferrortype + ' - ' + wferrordesc
            return wferrorfull, None
        else:
            try:
                wfcity = jsonstring['current_observation']['display_location'][
                    'full']
                wfdate1 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][0]['date']['weekday']
                wfdate2 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][1]['date']['weekday']
                wfdate3 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][2]['date']['weekday']
                wfdate4 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][3]['date']['weekday']
                wfcond1 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][0]['conditions']
                wfcond2 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][1]['conditions']
                wfcond3 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][2]['conditions']
                wfcond4 = jsonstring['forecast']['simpleforecast'][
                    'forecastday'][3]['conditions']
                wfhigh1f = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][0]['high']['fahrenheit'])
                wfhigh2f = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][1]['high']['fahrenheit'])
                wfhigh3f = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][2]['high']['fahrenheit'])
                wfhigh4f = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][3]['high']['fahrenheit'])
                wfhigh1c = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][0]['high']['celsius'])
                wfhigh2c = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][1]['high']['celsius'])
                wfhigh3c = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][2]['high']['celsius'])
                wfhigh4c = str(jsonstring['forecast']['simpleforecast']
                               ['forecastday'][3]['high']['celsius'])
                wflow1f = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][0]['low']['fahrenheit'])
                wflow2f = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][1]['low']['fahrenheit'])
                wflow3f = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][2]['low']['fahrenheit'])
                wflow4f = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][3]['low']['fahrenheit'])
                wflow1c = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][0]['low']['celsius'])
                wflow2c = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][1]['low']['celsius'])
                wflow3c = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][2]['low']['celsius'])
                wflow4c = str(jsonstring['forecast']['simpleforecast']
                              ['forecastday'][3]['low']['celsius'])

                degree_sign = u'\N{DEGREE SIGN}'

                return ('The forecast for ' + wfdate1 + ' in ' + wfcity +
                        ' is ' + wfcond1 + ' with a high of ' + wfhigh1f +
                        degree_sign + 'F (' + wfhigh1c + degree_sign +
                        'C) and a low of ' + wflow1f + degree_sign + 'F (' +
                        wflow1c + degree_sign + 'C). On ' + wfdate2 +
                        ' it will be ' + wfcond2 + ' with a high of ' +
                        wfhigh2f + degree_sign + 'F (' + wfhigh2c +
                        degree_sign + 'C) and a low of ' + wflow2f +
                        degree_sign + 'F (' + wflow2c + degree_sign +
                        'C). On ' + wfdate3 + ' it will be ' + wfcond3 +
                        ' with a high of ' + wfhigh3f + degree_sign + 'F (' +
                        wfhigh3c + degree_sign + 'C) and a low of ' + wflow3f +
                        degree_sign + 'F (' + wflow3c + degree_sign +
                        'C). On ' + wfdate4 + ' it will be ' + wfcond4 +
                        ' with a high of ' + wfhigh4f + degree_sign + 'F (' +
                        wfhigh4c + degree_sign + 'C) and a low of ' + wflow4f +
                        degree_sign + 'F (' + wflow4c + degree_sign +
                        'C).'), jsonstring
            except KeyError:
                return None, None
    else:
        return 'Sorry but you need to set your wunderground_api_key in the config file.', None
コード例 #54
0
ファイル: update.py プロジェクト: sqsgalaxys/DepQuery
 def retrieve_releases():
     wf().logger.info('retrieving releases: %s', github_slug)
     return web.get(api_url).json()
コード例 #55
0
ファイル: weather.py プロジェクト: NeoMahler/jenni
def forecast(jenni, input):
    if not hasattr(jenni.config, 'forecastio_apikey'):
        return jenni.say('Please sign up for a forecast.io API key at https://forecast.io/ or try .wx-noaa or .weather-noaa')

    txt = input.group(2)
    if not txt:
        return jenni.say('Please provide a location.')

    name, county, region, countryName, lat, lng = location(txt)

    url = 'https://api.forecast.io/forecast/%s/%s,%s'

    url = url % (jenni.config.forecastio_apikey, urllib.quote(lat), urllib.quote(lng))

    ## do some error checking
    try:
        ## if this fails, we got bigger problems
        page = web.get(url)
    except:
        return jenni.say('Could not acess https://api.forecast.io/')

    ## we want some tasty JSON
    try:
        data = json.loads(page)
    except:
        return jenni.say('The server did not return anything that was readable as JSON.')

    if 'daily' not in data or 'data' not in data['daily']:
        return jenni.say('Cannot find usable info in information returned by the server.')

    ## here we go...

    days = data['daily']['data']

    new_name = uc.decode(name)
    new_region = uc.decode(region)
    new_countryName = uc.decode(countryName)
    new_county = uc.decode(county)

    output = u'[%s, %s, %s] ' % (new_name, region, new_countryName)

    if 'alerts' in data:
        ## TODO: modularize the colourful parsing of alerts from nws.py so this can be colourized
        for alert in data['alerts']:
            jenni.say(alert['title'] + ' Expires at: ' + str(datetime.datetime.fromtimestamp(int(alert['expires']))))

    k = 1
    units = data['flags']['units']

    second_output = str()
    second_output = u'[%s, %s, %s] ' % (new_name, region, new_countryName)

    for day in days:
        ## give me floats with only one significant digit
        form = u'%.1f'

        hi = form % (day['temperatureMax'])
        low = form % (day['temperatureMin'])
        dew = form % (day['dewPoint'])

        ## convert measurements in F to C
        hiC = form % ((float(day['temperatureMax']) - 32) * (5.0 / 9.0))
        lowC = form % ((float(day['temperatureMin']) - 32) * (5.0 / 9.0))
        dewC = form % ((float(day['dewPoint'] - 32)) * (5.0 / 9.0))

        ## value taken from, https://is.gd/3dNrbW
        windspeedC = form % (float(day['windSpeed']) * 1.609344)
        windspeed = form % (day['windSpeed'])
        summary = day['summary']
        dotw_day = datetime.datetime.fromtimestamp(int(day['sunriseTime'])).weekday()
        dotw_day_pretty = u'\x0310\x02\x1F%s\x1F\x02\x03' % (dotw[dotw_day])

        line = u'%s: \x02\x0304%sF (%sC)\x03\x02 / \x02\x0312%sF (%sC)\x03\x02, \x1FDew\x1F: %sF (%sC), \x1FWind\x1F: %smph (%skmh), %s | '

        ## remove extra whitespace
        dotw_day_pretty = (dotw_day_pretty).strip()
        hi = (hi).strip()
        low = (low).strip()
        dew = (dew).strip()
        windspeed = (windspeed).strip()
        summary = (summary).strip()

        ## toggle unicode nonsense
        dotw_day_pretty = uc.encode(dotw_day_pretty)
        hi = uc.encode(hi)
        low = uc.encode(low)
        dew = uc.encode(dew)
        windspeed = uc.encode(windspeed)
        summary = uc.encode(summary)

        ## more unicode nonsense
        dotw_day_pretty = uc.decode(dotw_day_pretty).upper()
        hi = uc.decode(hi)
        low = uc.decode(low)
        dew = uc.decode(dew)
        windspeed = uc.decode(windspeed)
        summary = uc.decode(summary)

        ## only show 'today' and the next 3-days.
        ## but only show 2 days on each line
        if k <= 2:
            output += line % (dotw_day_pretty, hi, hiC, low, lowC, dew, dewC, windspeed, windspeedC, summary)
        elif k <= 4:
            second_output += line % (dotw_day_pretty, hi, hiC, low, lowC, dew, dewC, windspeed, windspeedC, summary)
        else:
            break

        k += 1

    ## chomp off the ending |
    if output.endswith(' | '):
        output = output[:-3]

    ## say the first part
    jenni.say(output)

    if second_output.endswith(' | '):
        second_output = second_output[:-3]

    ## required according to ToS by forecast.io
    second_output += ' (Powered by Forecast, forecast.io)'
    jenni.say(second_output)
コード例 #56
0
def uncyclopedia(term, last=False): 
    global wikiuri
    if not '%' in term: 
        if isinstance(term, str): 
            t = term
        else: t = term
        q = urllib.parse.quote(t)
        u = wikiuri % q
        bytes = web.get(u)
    else: bytes = web.get(wikiuri % term)
    bytes = r_tr.sub('', bytes)

    if not last: 
        r = r_redirect.search(bytes[:4096])
        if r: 
            term = urllib.parse.unquote(r.group(1))
            return uncyclopedia(term, last=True)

    paragraphs = r_paragraph.findall(bytes)

    if not paragraphs: 
        if not last: 
            term = search(term)
            return uncyclopedia(term, last=True)
        return None

    # Pre-process
    paragraphs = [para for para in paragraphs 
                      if (para and 'technical limitations' not in para 
                                  and 'window.showTocToggle' not in para 
                                  and 'Deletion_policy' not in para 
                                  and 'Template:AfD_footer' not in para 
                                  and not (para.startswith('<p><i>') and 
                                              para.endswith('</i></p>'))
                                  and not 'disambiguation)"' in para) 
                                  and not '(images and media)' in para
                                  and not 'This article contains a' in para 
                                  and not 'id="coordinates"' in para
                                  and not 'class="thumb' in para
                                  and not 'There is currently no text in this page.' in para]
                                  # and not 'style="display:none"' in para]

    for i, para in enumerate(paragraphs): 
        para = para.replace('<sup>', '|')
        para = para.replace('</sup>', '|')
        paragraphs[i] = text(para).strip()

    # Post-process
    paragraphs = [para for para in paragraphs if 
                      (para and not (para.endswith(':') and len(para) < 150))]

    para = text(paragraphs[0])
    m = r_sentence.match(para)

    if not m: 
        if not last: 
            term = search(term)
            return uncyclopedia(term, last=True)
        return None
    sentence = m.group(0)

    maxlength = 275
    if len(sentence) > maxlength: 
        sentence = sentence[:maxlength]
        words = sentence[:-5].split(' ')
        words.pop()
        sentence = ' '.join(words) + ' [...]'

    if (('using the Article Wizard if you wish' in sentence)
     or ('or add a request for it' in sentence)): 
        if not last: 
            term = search(term)
            return uncyclopedia(term, last=True)
        return None

    sentence = '"' + sentence.replace('"', "'") + '"'
    return sentence + ' - ' + (wikiuri % term)
コード例 #57
0
def c(jenni, input):
    '''.c -- Google calculator.'''

    ## let's not bother if someone doesn't give us input
    if not input.group(2):
        return jenni.reply('Nothing to calculate.')

    ## handle some unicode conversions
    q = input.group(2).encode('utf-8')
    q = q.replace('\xcf\x95', 'phi')  # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi')  # utf-8 U+03C0

    ## Attempt #1 (Google)
    uri = 'https://www.google.com/search?gbv=1&q='
    uri += web.urllib.quote(q)

    ## To the webs!
    try:
        page = web.get(uri)
    except:
        ## if we can't access Google for calculating
        ## let us move on to Attempt #2
        page = None
        answer = None

    if page:
        ## if we get a response from Google
        ## let us parse out an equation from Google Search results
        answer = c_answer.findall(page)

    if answer:
        ## if the regex finding found a match we want the first result
        answer = answer[0]
        #answer = answer.replace(u'\xc2\xa0', ',')
        answer = answer.decode('unicode-escape')
        answer = ''.join(chr(ord(c)) for c in answer)
        answer = uc.decode(answer)
        answer = answer.replace('<sup>', '^(')
        answer = answer.replace('</sup>', ')')
        answer = web.decode(answer)
        answer = answer.strip()
        jenni.say(answer)
    else:
        #### Attempt #2 (DuckDuckGo's API)
        ddg_uri = 'https://api.duckduckgo.com/?format=json&q='
        ddg_uri += urllib.quote(q)

        ## Try to grab page (results)
        ## If page can't be accessed, we shall fail!
        try:
            page = web.get(ddg_uri)
        except:
            page = None

        ## Try to take page source and json-ify it!
        try:
            json_response = json.loads(page)
        except:
            ## if it can't be json-ified, then we shall fail!
            json_response = None

        ## Check for 'AnswerType' (stolen from search.py)
        ## Also 'fail' to None so we can move on to Attempt #3
        if (not json_response) or (hasattr(json_response, 'AnswerType')
                                   and json_response['AnswerType'] != 'calc'):
            answer = None
        else:
            ## If the json contains an Answer that is the result of 'calc'
            ## then continue
            answer = re.sub(r'\<.*?\>', '', json_response['Answer']).strip()

        if answer:
            ## If we have found answer with Attempt #2
            ## go ahead and display it
            jenni.say(answer)
        else:
            #### Attempt #3 (DuckDuckGo's HTML)
            ## This relies on BeautifulSoup; if it can't be found, don't even bother
            try:
                from BeautifulSoup import BeautifulSoup
            except:
                return jenni.say(
                    'No results. (Please install BeautifulSoup for additional checking.)'
                )

            ddg_html_page = web.get(
                'https://duckduckgo.com/html/?q=%s&kl=us-en&kp=-1' %
                (web.urllib.quote(q)))
            soup = BeautifulSoup(ddg_html_page)

            ## use BeautifulSoup to parse HTML for an answer
            zero_click = str()
            if soup('div', {'class': 'zero-click-result'}):
                zero_click = str(
                    soup('div', {'class': 'zero-click-result'})[0])

            ## remove some excess text
            output = r_tag.sub('', zero_click).strip()
            output = output.replace('\n', '').replace('\t', '')

            ## test to see if the search module has 'remove_spaces'
            ## otherwise, let us fail
            try:
                output = search.remove_spaces(output)
            except:
                output = str()

            if output:
                ## If Attempt #3 worked, display the answer
                jenni.say(output)
            else:
                ## If we made it this far, we have tried all available resources
                jenni.say('Absolutely no results!')
コード例 #58
0
ファイル: youtube.py プロジェクト: wolfy1339/jenni
def ytget(jenni, trigger):
    if not hasattr(jenni.config, 'google_dev_apikey'):
        return 'err'

    key = jenni.config.google_dev_apikey

    try:
        vid_id = trigger.group(2)
        uri = BASE_URL + "videos?part=snippet,contentDetails,statistics&id=" + vid_id + "&key=" + key
        bytes = web.get(uri)
        result = json.loads(bytes)
        video_entry = result['items'][0]
    except IndexError:
        jenni.say('Video not found through the YouTube API.')
        return 'err'
    except Exception:
        jenni.say('Something went wrong when accessing the YouTube API.')
        traceback.print_exc()
        return 'err'

    vid_info = {}
    vid_info['link'] = 'https://youtu.be/' + vid_id

    try:
        vid_info['title'] = video_entry['snippet']['title']
    except KeyError:
        vid_info['title'] = 'N/A'

    #get youtube channel
    try:
        vid_info['uploader'] = video_entry['snippet']['channelTitle']
    except KeyError:
        vid_info['uploader'] = 'N/A'

    #get upload time in format: yyyy-MM-ddThh:mm:ss.sssZ
    try:
        upraw = video_entry['snippet']['publishedAt']
        vid_info['uploaded'] = '%s/%s/%s, %s:%s' % (
            upraw[0:4], upraw[5:7], upraw[8:10], upraw[11:13], upraw[14:16])
    except KeyError:
        vid_info['uploaded'] = 'N/A'

    #get duration in seconds (contentDetails)
    try:
        if video_entry["snippet"]["liveBroadcastContent"] == "live":
            vid_info['length'] = 'LIVE'
        elif video_entry["snippet"]["liveBroadcastContent"] == "upcoming":
            vid_info['length'] = 'UPCOMING'
        else:
            duration = video_entry["contentDetails"]["duration"]
            # Now replace
            duration = duration.replace("P", "")
            duration = duration.replace("D", "days ")
            duration = duration.replace("T", "")
            duration = duration.replace("H", "hours ")
            duration = duration.replace("M", "mins ")
            duration = duration.replace("S", "secs")
            vid_info['length'] = duration
    except KeyError:
        vid_info['length'] = 'N/A'

    #get views (statistics)
    try:
        views = video_entry['statistics']['viewCount']
        vid_info['views'] = str('{0:20,d}'.format(int(views))).lstrip(' ')
    except KeyError:
        vid_info['views'] = 'N/A'

    #get comment count (statistics)
    try:
        comments = video_entry['statistics']['commentCount']
        vid_info['comments'] = str('{0:20,d}'.format(
            int(comments))).lstrip(' ')
    except KeyError:
        vid_info['comments'] = 'N/A'

    #get favourites (statistics)
    try:
        favourites = video_entry['statistics']['favoriteCount']
        vid_info['favourites'] = str('{0:20,d}'.format(
            int(favourites))).lstrip(' ')
    except KeyError:
        vid_info['favourites'] = 'N/A'

    #get likes & dislikes (statistics)
    try:
        likes = video_entry['statistics']['likeCount']
        vid_info['likes'] = str('{0:20,d}'.format(int(likes))).lstrip(' ')
    except KeyError:
        vid_info['likes'] = 'N/A'
    try:
        dislikes = video_entry['statistics']['dislikeCount']
        vid_info['dislikes'] = str('{0:20,d}'.format(
            int(dislikes))).lstrip(' ')
    except KeyError:
        vid_info['dislikes'] = 'N/A'

    #get video description (snippet)
    try:
        vid_info['description'] = video_entry['snippet']['description']
    except KeyError:
        vid_info['description'] = 'N/A'
    return vid_info
コード例 #59
0
ファイル: search.py プロジェクト: meinwald/jenni
def bing_search(query, lang='en-GB'):
    query = web.urllib.quote(query)
    base = 'http://www.bing.com/search?mkt=%s&q=' % lang
    bytes = web.get(base + query)
    m = r_bing.search(bytes)
    if m: return m.group(1)
コード例 #60
0
ファイル: test.py プロジェクト: 401660605/test
# coding:utf8
from selenium import webdriver
import web, time

driver = web.open_browser()
web.get('http://www.5gegg.cn')
web.input('//*[@id="app"]/div/div/div[2]/div[1]/div[2]/div[1]/input', 'wlj')
web.input('//*[@id="app"]/div/div/div[2]/div[1]/div[2]/div[2]/input', '123567')
web.input('//*[@id="app"]/div/div/div[2]/div[1]/div[2]/div[3]/input', '1')
web.click('//*[@id="app"]/div/div/div[2]/div[1]/div[2]/button')
time.sleep(1)
text = web.get_text('//*[@id="app"]/div/div/div[2]/div[1]/div[3]/span[1]')
web.assert_equals('{text}','王丽娟欢迎您~')