def shuttle_times () : 

	file_data = urllib.open( http://shuttleboy.cs50.net/api/1.2/trips?a=Quad&b=Mass Ave Garden St&sdt=2009-12-02&output=json)

	shuttle_data = json.load(file_data)

	print shuttle_data
Ejemplo n.º 2
0
    def activeBuses(self, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/locations/" + route
        predictions = parse(open(url)).getElementsByTagName("vehicle")
        return [int(p.getAttribute("id")) for p in predictions]
Ejemplo n.º 3
0
def getAllRoutes():
    from urllib import urlopen as open
    from xml.dom.minidom import parse

    url = "https://gtbuses.herokuapp.com/routeConfig"
    routes = parse(open(url)).getElementsByTagName("route")
    return [str(route.getAttribute("tag")) for route in routes]
Ejemplo n.º 4
0
    def activeBuses(self, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/locations/" + route
        predictions = parse(open(url)).getElementsByTagName("vehicle")
        return [int(p.getAttribute("id")) for p in predictions]
Ejemplo n.º 5
0
    def getBusTimes(self, route, stop):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + stop
        predictions = parse(open(url)).getElementsByTagName("prediction")
        return [int(p.getAttribute("seconds")) for p in predictions]
Ejemplo n.º 6
0
def getAllRoutes():
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    
    url = "https://gtbuses.herokuapp.com/routeConfig"
    routes = parse(open(url)).getElementsByTagName("route")
    return [str(route.getAttribute("tag")) for route in routes]
Ejemplo n.º 7
0
def getRouteInformation(*stops):
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    url = "https://gtbuses.herokuapp.com/routeConfig"
    page = parse(open(url))
    
    stopNames = {}
    for stopTag in page.getElementsByTagName("stop"):
        stop = stopTag.getAttribute("tag")
        
        if stop in stops and stopTag.parentNode.tagName == "route":
            stopNames[stop] = stopTag.getAttribute("title")
    
    routes = []
    for routeTag in page.getElementsByTagName("route"):
        route = routeTag.getAttribute("tag")
        for directionTag in routeTag.getElementsByTagName("direction"):
            direction = directionTag.getAttribute("title")
            for stopTag in directionTag.getElementsByTagName("stop"):
                stop = stopTag.getAttribute("tag")
                
                if stop in stops:
                    info = {"route": route, "direction": direction,
                            "routeName": routeTag.getAttribute("title"),
                            "stop": stop, "stopName": stopNames[stop]} 
                    routes.append(info)
                    
    return routes
Ejemplo n.º 8
0
    def getBusTimes(self, route, stop):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + stop
        predictions = parse(open(url)).getElementsByTagName("prediction")
        return [int(p.getAttribute("seconds")) for p in predictions]
Ejemplo n.º 9
0
def getRouteInformation(*stops):
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    url = "https://gtbuses.herokuapp.com/routeConfig"
    page = parse(open(url))

    stopNames = {}
    for stopTag in page.getElementsByTagName("stop"):
        stop = stopTag.getAttribute("tag")

        if stop in stops and stopTag.parentNode.tagName == "route":
            stopNames[stop] = stopTag.getAttribute("title")

    routes = []
    for routeTag in page.getElementsByTagName("route"):
        route = routeTag.getAttribute("tag")
        for directionTag in routeTag.getElementsByTagName("direction"):
            direction = directionTag.getAttribute("title")
            for stopTag in directionTag.getElementsByTagName("stop"):
                stop = stopTag.getAttribute("tag")

                if stop in stops:
                    info = {
                        "route": route,
                        "direction": direction,
                        "routeName": routeTag.getAttribute("title"),
                        "stop": stop,
                        "stopName": stopNames[stop]
                    }
                    routes.append(info)

    return routes
def __main__():

    # read in any command line parameters

    parameter = sys.argv

    # initialize a connection to the computer hardware, and register the rotational speed sensor

    GPIO.setwarnings(False)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(sensor, GPIO.IN,
    pull_up_down=GPIO.PUD_UP)

    # create a function binding the speed calculation function to the rotational speed sensor

    GPIO.add_event_detect(sensor, GPIO.FALLING, bouncetime = 15, callback = calculate_speed)

    # if functioning as a generator, continuously loop

    if generator:

        while True:

            # if the power consumption meter is used, retrieve the current power consumption
            # from the power consumption meter, and call the function to increase or decrease engagement
            # based on current consumption

            if power_meter:

                power_meter_address = \
                    "http://" + power_meter_ip + \
                    "/?username="******"&password="******"&command=consumption"

                power_meter_consumption = urllib.open(power_meter_address).read()

                consumption_kilometers_per_hour = \
                    (power_meter_consumption/ \
                    generator_maximum_watts) * \
                    generator_maximum_kilometers_per_hour

                set_speed(consumption_kilometers_per_hour)

        # if the power consumption meter is not in use, run the unit at maximum output

        if not power_meter:

            set_speed(generator_revolutions_per_minute)

    # if the generator is not in use, listen for user input to control motor output

    if not generator:

        while True:

            listen()
Ejemplo n.º 11
0
def getRoutes(*stopNames):
    from urllib import urlopen as open
    from xml.dom.minidom import parse

    url = "https://gtbuses.herokuapp.com/routeConfig"
    stops = parse(open(url)).getElementsByTagName("stop")
    return [(s.parentNode.getAttribute("tag"), s.getAttribute("tag"))
            for s in stops if s.getAttribute("tag") in stopNames
            and s.parentNode.tagName == "route"]
Ejemplo n.º 12
0
    def getEpochKey(self, bus, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/locations/" + route
        predictions = parse(open(url)).getElementsByTagName("lastTime")

        if predictions[0] != None:
            return predictions[0].getAttribute("time")
Ejemplo n.º 13
0
def getRoutes(*stopNames):
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    
    url = "https://gtbuses.herokuapp.com/routeConfig"
    stops = parse(open(url)).getElementsByTagName("stop")
    return [(s.parentNode.getAttribute("tag"), s.getAttribute("tag"))
            for s in stops if s.getAttribute("tag") in stopNames
            and s.parentNode.tagName == "route"]
Ejemplo n.º 14
0
    def getEpochKey(self, bus, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/locations/" + route
        predictions = parse(open(url)).getElementsByTagName("lastTime")

        if predictions[0] != None:
            return predictions[0].getAttribute("time")
Ejemplo n.º 15
0
def _init():
    global SANTE
    sock=urllib.open(xmlsource)
    xmldoc = minidom.parse(sock).documentElement
    for index in xmldoc.getElementsByTagName("index"):
        values=[]
        for year in index.getElementsByTagName("year"):
            for entry in year.values.getElementsByTagName("entry"):
                values.append(entry.attributes["month"].value)
Ejemplo n.º 16
0
def _init():
    global SANTE
    sock = urllib.open(xmlsource)
    xmldoc = minidom.parse(sock).documentElement
    for index in xmldoc.getElementsByTagName("index"):
        values = []
        for year in index.getElementsByTagName("year"):
            for entry in year.values.getElementsByTagName("entry"):
                values.append(entry.attributes["month"].value)
Ejemplo n.º 17
0
    def timeToNextStop(self, nextStop, bus, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + nextStop
        predictions = parse(open(url)).getElementsByTagName("prediction")

        for p in predictions:
            if int(p.getAttribute("vehicle")) == int(bus):
                return p.getAttribute("seconds")
Ejemplo n.º 18
0
    def timeToNextStop(self, nextStop, bus, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + nextStop
        predictions = parse(open(url)).getElementsByTagName("prediction")

        for p in predictions:
            if int(p.getAttribute("vehicle")) == int(bus):
                return p.getAttribute("seconds")
Ejemplo n.º 19
0
def check_profanity(text_to_check):
    connection = urllib.open("http://www.wdyl.com/profanity?q=" +
                             text_to_check)
    output = connection.read()
    #print (output)
    connection.close()
    if "true" in output:
        print("Profanity Alert !")
    elif "false" in output:
        print("This document has no curse words!")
    else:
        print("Could not scan the document properly.")
Ejemplo n.º 20
0
    def getStopsFromRoute(self, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/predictions/" + route
        predictions = parse(open(url)).getElementsByTagName("predictions")

        result = {}

        for p in predictions:
            result[p.getAttribute("stopTag")] = p.getAttribute("stopTitle")
        return result
Ejemplo n.º 21
0
    def getStopsFromRoute(self, route):
        from urllib import urlopen as open
        from xml.dom.minidom import parse

        url = "https://gtbuses.herokuapp.com/predictions/" + route
        predictions = parse(open(url)).getElementsByTagName("predictions")

        result = {}

        for p in predictions:
            result[p.getAttribute("stopTag")] = p.getAttribute("stopTitle")
        return result
Ejemplo n.º 22
0
def customsearch(querry):
    results_dict   = defaultdict(list)
    encoded = urllib.quote(querry)
    rawData = urllib.open('https://www.googleapis.com/customsearch/v1?key=AIzaSyDthXFjwIaHm_SbjGjaqWthyVtvACbpxxY&cx=017576662512468239146:omuauf_lfve&q=' +encoded)
    data = json.load(rawData)

    length = len(data["items"])
    for i in range (length):
        results_dict[i].append(data["items"][i]["title"])
        results_dict[i].append(data["items"][i]["htmlFormattedUrl"])

    return results_dict
        
Ejemplo n.º 23
0
 def Auth(email,password):
     if len(email)==0:
         print('email can\'t empty')
     if len(password)==0:
         print('password can\'t empty')
     Isloged()
     try:
         authopen = urllib.open('%s?email=%s&password=%s' % (DnsPodAPI['auth'],email,password))
         authopen = authopen.read()
         authjson = json.loads(authopen)
         Logininfo = 'mario=%s' % (authjson['mario'])
         return true
     except:
         print>>stderr,'Auth Error\nException:%s' % (e)
         return false
Ejemplo n.º 24
0
 def Auth(email, password):
     if len(email) == 0:
         print('email can\'t empty')
     if len(password) == 0:
         print('password can\'t empty')
     Isloged()
     try:
         authopen = urllib.open('%s?email=%s&password=%s' %
                                (DnsPodAPI['auth'], email, password))
         authopen = authopen.read()
         authjson = json.loads(authopen)
         Logininfo = 'mario=%s' % (authjson['mario'])
         return true
     except:
         print >> stderr, 'Auth Error\nException:%s' % (e)
         return false
Ejemplo n.º 25
0
def geoGrab(address, city):
    """
    程序清单10-4 Yahoo! PlaceFinder API

    :param address: 地址
    :param city: 城市
    :return: JSON数据
    """
    apiStem = 'http://where.yahooapis.com/geocode?'
    params = {
        'flags': 'J',
        'appid': 'ppp68N8t',
        'location': '%s %s' % (address, city)
    }
    url_params = urllib.urlencode(params)
    yahooApi = apiStem + url_params
    print(yahooApi)
    c = urllib.open(yahooApi)
    return json.load(c.read())
Ejemplo n.º 26
0
def getStops(route):
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    route = route.lower()
    
    url = "https://gtbuses.herokuapp.com/routeConfig"
    for r in parse(open(url)).getElementsByTagName("route"):
        if r.getAttribute("tag") ==  route:
            stops = r.getElementsByTagName("stop")
            result = {}
            
            for stop in stops:
                tag = str(stop.getAttribute("tag"))
                title = str(stop.getAttribute("title"))
                
                if tag not in result:
                    result[tag] = title
            
            return result
            
    raise ValueError("Route \"" + route + "\" does not exist.")
Ejemplo n.º 27
0
def getStops(route):
    from urllib import urlopen as open
    from xml.dom.minidom import parse
    route = route.lower()

    url = "https://gtbuses.herokuapp.com/routeConfig"
    for r in parse(open(url)).getElementsByTagName("route"):
        if r.getAttribute("tag") == route:
            stops = r.getElementsByTagName("stop")
            result = {}

            for stop in stops:
                tag = str(stop.getAttribute("tag"))
                title = str(stop.getAttribute("title"))

                if tag not in result:
                    result[tag] = title

            return result

    raise ValueError("Route \"" + route + "\" does not exist.")
Ejemplo n.º 28
0
     .format(domain))
 outputEmails = response.read()
 returnedValues = json.loads(outputEmails)
 returnedval = len(returnedValues['data']['emails'])
 i = 0
 if returnedval == 0:
     ptext = '<font size=10>No emails were found for this domain</font>'
     Story.append(Paragraph(ptext, styles["Normal"]))
     Story.append(Spacer(1, 5))
 else:
     while i < returnedval:
         temp_emails = []
         temp_emails.append(returnedValues['data']['emails'][i]['value'])
         try:
             check2hacked = urllib.open(
                 'https://haveibeenpwned.com/api/v2/breachedaccount/{0}?truncateResponse=true'
                 .format(returnedValues['data']['emails'][i]['value']))
             check2parse = json.loads(check2hacked.read())
             temp_emails.append('Hacked')
             sleep(2.0)
         except Exception as e:
             temp_emails.append('Email is safe')
             sleep(2.0)
             emails.append(temp_emails)
         i += 1
     t = Table(emails)
     t.setStyle(
         TableStyle([('ALIGN', (1, 1), (-2, -2), 'RIGHT'),
                     ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
                     ('BOX', (0, 0), (-1, -1), 0.25, colors.black)]))
     Story.append(t)
Ejemplo n.º 29
0
import urllib
fhand = urllib.open('http://www.py4inf.com/code/romeo.txt')

for line in fhand:
    print((line.strip()))
Ejemplo n.º 30
0
import urllib
import json

url_testAPI = 'https://www.cryptocompare.com/api/data/coinlist/'

#json.load(url_testAPI)
url_json = urllib.open(url_testAPI)
Ejemplo n.º 31
0
    import code
    import os
    import sys
    try: import readline
    except ImportError: pass
    else:
        import rlcompleter
        readline.parse_and_bind("tab: complete")

    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option('-e', '--exec', dest='exec_', default=None)
    options, args = parser.parse_args()

    try:
        from urllib import urlopen as open
    except ImportError:
        pass
    
    if options.exec_:
        for fname in args:
            if not os.access(fname, os.F_OK):
                print "Not accessible:", fname
                continue
            S = io_open(open(fname))
            exec(options.exec_)
    else:
        S = io_open(open(args[0]))
        code.interact(local=locals(), banner="")
    
Ejemplo n.º 32
0
import json
import urllib
x = 0
sum = 0
url = input("enter url:")
uh = urllib.open(url)
data = uh.read().decode()
info = json.loads(str(data))
for i in info['comments']:
    x = x + 1
    sum = sum + i['count']
print('sum=', sum)
Ejemplo n.º 33
0
import urllib


login_url = 'https://clearpass.vanderlande.com/guest/vanderlande-agreement.php?_browser=1'


# Get login page
login_html = urllib.open(login_url)

# post accept
login_form_data = {
  'user':'******'
  'password':'******'
  'url':'https://www.vanderlande.com'
  'cmd':'authenticate'
  'Login':'******' }

form:wq
Ejemplo n.º 34
0
def actionCloudflare(ip):
    url = 'https://www.cloudflare.com/api.html?a=ban&key=%s&u=%s&tkn=%s' % (
        ip,
        config.get('cloudflare', 'user'),
        config.get('cloudflare', 'token'))
    urllib.open(url).read()
Ejemplo n.º 35
0
def message(id, msg):
    import urllib
    params = urllib.urlencode({'id': id, 'msg': msg })
    req = urllib.open("http://localhost:8080?%s" % params)
    print(req.read())
    log_data.append(msg)
Ejemplo n.º 36
0
import urllib
import os
clipboard = "https://sites.google.com/site/cyke642/clipboard.py?attredirects=0"
py2exe = "https://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.6.exe?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Fpy2exe%2Ffiles%2F&amp;ts=1356117096&amp;use_mirror=superb-dca3"
f1 = open("py2exe")
print"Downloading py2exe installer..."
f1.write(urllib.open(py2exe).read())
print"Py2exe download finished."
f1.close()
f2 = open("clipboard")
print"Downloading clipboard"
f2.write(urllib.open(clipboard).read())
print"Clipboard download finished."
f2.close()
print"Please Install py2exe"
os.system(os.getwd()+"py2exe")
raw_input("Setup finished,Press enter to exit.")
Ejemplo n.º 37
0
def actionCloudflare(ip):
    url = 'https://www.cloudflare.com/api.html?a=ban&key=%s&u=%s&tkn=%s' % (
        ip, config.get('cloudflare', 'user'), config.get(
            'cloudflare', 'token'))
    urllib.open(url).read()
Ejemplo n.º 38
0
def getHtml(url):
	page = urllib.open(url)
	html = page.read()
	return html
Ejemplo n.º 39
0
 def __send(cls, method: str, url: str, headers: dict, params: dict) -> dict:
     urllib.open(req) as res:
         return json.loads(res)
# coding: utf-8
import lxml.html
html = lxml.html.parse(http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng)
html = lxml.html.parse("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng")
frienddivs = html.cssselect("div.user-passport")
import urllib
f_html = urllib.open("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng")
f_html = urllib.urlopen("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng")
html_str = "".join(f_html.readlines())
f_html.close()
doc = lxml.html.fromstring(html_str)
frienddivs = doc.cssselect("div.user-passport")
len(frienddivs)
friendids = []
for frienddiv in frienddivs:
    friendids.append(frienddiv.cssselect("div.photoBox a")[0].get("href"))
    
friendids
friendids_p = []
for id in friendids:
    import re
    friendids_p.append(re.sub(".+=", "", id))
    
friendids_p
import json
f = open("friends.json", "w")
json.dump(friendids_p, f)
f.close()
Ejemplo n.º 41
0
def getlongurl2.x(url):
    import urllib
    resp = urllib.open(url)
    return resp.url

def getlongurl3.x(url):
    import urllib.request
    resp = urllib.request.urlopen(url)
    return resp.url
Ejemplo n.º 42
0
import time
import unittest
import operator
import urllib

# import webDriver
# from Selenium import webdriver
# import account

response = urllib.open("www.baidu.com")
print(response)
def get_page(pagelink):
    try:
        import urllib
        return urllib.open(pagelink).read()
    except:
        return ""
 def read_url(url):
     websites[url] = urllib.open(url).read()
Ejemplo n.º 45
0
def getHtml(url):
    page = urllib.open(url)
    html = page.read()
    return html
Ejemplo n.º 46
0
def getfilesize(url):
    f = u.open(url)
    size = f.info()['Content-Length']
    return size
Ejemplo n.º 47
0
	def get_user_tumblogs(self):
		response=urllib.open(self.options.api_base+'/authenticate',self.parameters)
		tumblrelement=ElementTree.parse(response.read())
		tumblelogs=tumblrelement.xpath('tumblelog')
		return [t.get(url) for t in tumblelogs]
def get_page(url):
    try:
        import urllib
        return urllib.open(url).read()
    except:
        return ""
Ejemplo n.º 49
0
def speech(content):
    connection = urllib.open(
        "https://api.ispeech.org/api/rest?action=convert&text=content&voice=usenglishfemale &format=mp3 &frequency=44100 &bitrate=128&speed=1&startpadding=1 &endpadding=1 &pitch=110 &filename=myaudiofile"
    )
    output = connection.read()
    print(output)
Ejemplo n.º 50
0
def get_page(url):
    try:
        import urllib
        return urllib.open(url).read()
    except:
        return ""
Ejemplo n.º 51
0
 def read_url(url):
     websites[url] = urllib.open(url).read()
Ejemplo n.º 52
0
df['recommended_channel_names'] = df.index.to_series().map(recommendations)

# Preparing the dataframe for conversion to a .csv file (to be made into a table in sqlite3 database)
df_reduced = df.drop(['display_name', 'emotes', 'emotes_parsed', 'prefix', 'broadcaster_language', 'language', 'description', 'followers', 'views', 'mature', 'game', 'status', 'created_at', 'updated_at'], axis=1)

# PUtting recommendations into their own columns
df_reduced[['rec1','rec2','rec3','rec4', 'rec5','rec6', 'rec7','rec8', 'rec9','rec10']] = pd.DataFrame([x for x in df_reduced['recommended_channel_names']])

df_reduced = df_reduced.drop(['recommendations', 'recommended_channel_names'], axis=1)

app_csv = df_reduced.to_csv('twitch.csv', index=False)

-------------------------------------------------

# Getting emote urls
response = urllib.open('https://api.twitch.tv/kraken/chat/emoticons')
emotes = json.load(response)

img_urls = pd.DataFrame.from_dict(emotes['emoticons'])

urls = {}
for i in xrange(len(img_urls)):
    urls[img_urls.iloc[i]['regex']] = img_urls.iloc[i]['images'][0]['url']

# Putting all the emotes into a list
imgs = []
for i in xrange(len(df)):
    for j in xrange(len(df['emotes'].iloc[i])):
        imgs.append((df['name'].iloc[i], df['emotes'].iloc[i][j]))

channel = []
Ejemplo n.º 53
0
    print a[5]
except:
    print "try语句块中出错了执行这里"
    #sys.exit(-1)
else:
    print "try语句块中没出错执行这里"
finally:
    print "无论如何都会执行这里,即使中途exit了"


import urllib

sth_url = "http://www.baidu.com"

try:
    d = urllib.open(sth_url)
except IOError:
    print "该url无法打开"
except:
    print "其他错误"
else:
    content = d.read()
    print content
    d.close()

import logging

logger = logging.getLogger() #创建一个logging对象
hdlr = logging.FileHandler('testlog.txt') #创建存放日志的文件句柄
formater = logging.Formatter('%(asctime)s %(levelname)s %(message)s') #设置日志格式
hdlr.setFormatter(formater) #文件绑定格式