def shuttle_times () : file_data = urllib.open( http://shuttleboy.cs50.net/api/1.2/trips?a=Quad&b=Mass Ave Garden St&sdt=2009-12-02&output=json) shuttle_data = json.load(file_data) print shuttle_data
def activeBuses(self, route): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/locations/" + route predictions = parse(open(url)).getElementsByTagName("vehicle") return [int(p.getAttribute("id")) for p in predictions]
def getAllRoutes(): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/routeConfig" routes = parse(open(url)).getElementsByTagName("route") return [str(route.getAttribute("tag")) for route in routes]
def getBusTimes(self, route, stop): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + stop predictions = parse(open(url)).getElementsByTagName("prediction") return [int(p.getAttribute("seconds")) for p in predictions]
def getRouteInformation(*stops): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/routeConfig" page = parse(open(url)) stopNames = {} for stopTag in page.getElementsByTagName("stop"): stop = stopTag.getAttribute("tag") if stop in stops and stopTag.parentNode.tagName == "route": stopNames[stop] = stopTag.getAttribute("title") routes = [] for routeTag in page.getElementsByTagName("route"): route = routeTag.getAttribute("tag") for directionTag in routeTag.getElementsByTagName("direction"): direction = directionTag.getAttribute("title") for stopTag in directionTag.getElementsByTagName("stop"): stop = stopTag.getAttribute("tag") if stop in stops: info = {"route": route, "direction": direction, "routeName": routeTag.getAttribute("title"), "stop": stop, "stopName": stopNames[stop]} routes.append(info) return routes
def getRouteInformation(*stops): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/routeConfig" page = parse(open(url)) stopNames = {} for stopTag in page.getElementsByTagName("stop"): stop = stopTag.getAttribute("tag") if stop in stops and stopTag.parentNode.tagName == "route": stopNames[stop] = stopTag.getAttribute("title") routes = [] for routeTag in page.getElementsByTagName("route"): route = routeTag.getAttribute("tag") for directionTag in routeTag.getElementsByTagName("direction"): direction = directionTag.getAttribute("title") for stopTag in directionTag.getElementsByTagName("stop"): stop = stopTag.getAttribute("tag") if stop in stops: info = { "route": route, "direction": direction, "routeName": routeTag.getAttribute("title"), "stop": stop, "stopName": stopNames[stop] } routes.append(info) return routes
def __main__(): # read in any command line parameters parameter = sys.argv # initialize a connection to the computer hardware, and register the rotational speed sensor GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(sensor, GPIO.IN, pull_up_down=GPIO.PUD_UP) # create a function binding the speed calculation function to the rotational speed sensor GPIO.add_event_detect(sensor, GPIO.FALLING, bouncetime = 15, callback = calculate_speed) # if functioning as a generator, continuously loop if generator: while True: # if the power consumption meter is used, retrieve the current power consumption # from the power consumption meter, and call the function to increase or decrease engagement # based on current consumption if power_meter: power_meter_address = \ "http://" + power_meter_ip + \ "/?username="******"&password="******"&command=consumption" power_meter_consumption = urllib.open(power_meter_address).read() consumption_kilometers_per_hour = \ (power_meter_consumption/ \ generator_maximum_watts) * \ generator_maximum_kilometers_per_hour set_speed(consumption_kilometers_per_hour) # if the power consumption meter is not in use, run the unit at maximum output if not power_meter: set_speed(generator_revolutions_per_minute) # if the generator is not in use, listen for user input to control motor output if not generator: while True: listen()
def getRoutes(*stopNames): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/routeConfig" stops = parse(open(url)).getElementsByTagName("stop") return [(s.parentNode.getAttribute("tag"), s.getAttribute("tag")) for s in stops if s.getAttribute("tag") in stopNames and s.parentNode.tagName == "route"]
def getEpochKey(self, bus, route): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/locations/" + route predictions = parse(open(url)).getElementsByTagName("lastTime") if predictions[0] != None: return predictions[0].getAttribute("time")
def _init(): global SANTE sock=urllib.open(xmlsource) xmldoc = minidom.parse(sock).documentElement for index in xmldoc.getElementsByTagName("index"): values=[] for year in index.getElementsByTagName("year"): for entry in year.values.getElementsByTagName("entry"): values.append(entry.attributes["month"].value)
def _init(): global SANTE sock = urllib.open(xmlsource) xmldoc = minidom.parse(sock).documentElement for index in xmldoc.getElementsByTagName("index"): values = [] for year in index.getElementsByTagName("year"): for entry in year.values.getElementsByTagName("entry"): values.append(entry.attributes["month"].value)
def timeToNextStop(self, nextStop, bus, route): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/multiPredictions?stops=" + route + "|" + nextStop predictions = parse(open(url)).getElementsByTagName("prediction") for p in predictions: if int(p.getAttribute("vehicle")) == int(bus): return p.getAttribute("seconds")
def check_profanity(text_to_check): connection = urllib.open("http://www.wdyl.com/profanity?q=" + text_to_check) output = connection.read() #print (output) connection.close() if "true" in output: print("Profanity Alert !") elif "false" in output: print("This document has no curse words!") else: print("Could not scan the document properly.")
def getStopsFromRoute(self, route): from urllib import urlopen as open from xml.dom.minidom import parse url = "https://gtbuses.herokuapp.com/predictions/" + route predictions = parse(open(url)).getElementsByTagName("predictions") result = {} for p in predictions: result[p.getAttribute("stopTag")] = p.getAttribute("stopTitle") return result
def customsearch(querry): results_dict = defaultdict(list) encoded = urllib.quote(querry) rawData = urllib.open('https://www.googleapis.com/customsearch/v1?key=AIzaSyDthXFjwIaHm_SbjGjaqWthyVtvACbpxxY&cx=017576662512468239146:omuauf_lfve&q=' +encoded) data = json.load(rawData) length = len(data["items"]) for i in range (length): results_dict[i].append(data["items"][i]["title"]) results_dict[i].append(data["items"][i]["htmlFormattedUrl"]) return results_dict
def Auth(email,password): if len(email)==0: print('email can\'t empty') if len(password)==0: print('password can\'t empty') Isloged() try: authopen = urllib.open('%s?email=%s&password=%s' % (DnsPodAPI['auth'],email,password)) authopen = authopen.read() authjson = json.loads(authopen) Logininfo = 'mario=%s' % (authjson['mario']) return true except: print>>stderr,'Auth Error\nException:%s' % (e) return false
def Auth(email, password): if len(email) == 0: print('email can\'t empty') if len(password) == 0: print('password can\'t empty') Isloged() try: authopen = urllib.open('%s?email=%s&password=%s' % (DnsPodAPI['auth'], email, password)) authopen = authopen.read() authjson = json.loads(authopen) Logininfo = 'mario=%s' % (authjson['mario']) return true except: print >> stderr, 'Auth Error\nException:%s' % (e) return false
def geoGrab(address, city): """ 程序清单10-4 Yahoo! PlaceFinder API :param address: 地址 :param city: 城市 :return: JSON数据 """ apiStem = 'http://where.yahooapis.com/geocode?' params = { 'flags': 'J', 'appid': 'ppp68N8t', 'location': '%s %s' % (address, city) } url_params = urllib.urlencode(params) yahooApi = apiStem + url_params print(yahooApi) c = urllib.open(yahooApi) return json.load(c.read())
def getStops(route): from urllib import urlopen as open from xml.dom.minidom import parse route = route.lower() url = "https://gtbuses.herokuapp.com/routeConfig" for r in parse(open(url)).getElementsByTagName("route"): if r.getAttribute("tag") == route: stops = r.getElementsByTagName("stop") result = {} for stop in stops: tag = str(stop.getAttribute("tag")) title = str(stop.getAttribute("title")) if tag not in result: result[tag] = title return result raise ValueError("Route \"" + route + "\" does not exist.")
.format(domain)) outputEmails = response.read() returnedValues = json.loads(outputEmails) returnedval = len(returnedValues['data']['emails']) i = 0 if returnedval == 0: ptext = '<font size=10>No emails were found for this domain</font>' Story.append(Paragraph(ptext, styles["Normal"])) Story.append(Spacer(1, 5)) else: while i < returnedval: temp_emails = [] temp_emails.append(returnedValues['data']['emails'][i]['value']) try: check2hacked = urllib.open( 'https://haveibeenpwned.com/api/v2/breachedaccount/{0}?truncateResponse=true' .format(returnedValues['data']['emails'][i]['value'])) check2parse = json.loads(check2hacked.read()) temp_emails.append('Hacked') sleep(2.0) except Exception as e: temp_emails.append('Email is safe') sleep(2.0) emails.append(temp_emails) i += 1 t = Table(emails) t.setStyle( TableStyle([('ALIGN', (1, 1), (-2, -2), 'RIGHT'), ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black), ('BOX', (0, 0), (-1, -1), 0.25, colors.black)])) Story.append(t)
import urllib fhand = urllib.open('http://www.py4inf.com/code/romeo.txt') for line in fhand: print((line.strip()))
import urllib import json url_testAPI = 'https://www.cryptocompare.com/api/data/coinlist/' #json.load(url_testAPI) url_json = urllib.open(url_testAPI)
import code import os import sys try: import readline except ImportError: pass else: import rlcompleter readline.parse_and_bind("tab: complete") from optparse import OptionParser parser = OptionParser() parser.add_option('-e', '--exec', dest='exec_', default=None) options, args = parser.parse_args() try: from urllib import urlopen as open except ImportError: pass if options.exec_: for fname in args: if not os.access(fname, os.F_OK): print "Not accessible:", fname continue S = io_open(open(fname)) exec(options.exec_) else: S = io_open(open(args[0])) code.interact(local=locals(), banner="")
import json import urllib x = 0 sum = 0 url = input("enter url:") uh = urllib.open(url) data = uh.read().decode() info = json.loads(str(data)) for i in info['comments']: x = x + 1 sum = sum + i['count'] print('sum=', sum)
import urllib login_url = 'https://clearpass.vanderlande.com/guest/vanderlande-agreement.php?_browser=1' # Get login page login_html = urllib.open(login_url) # post accept login_form_data = { 'user':'******' 'password':'******' 'url':'https://www.vanderlande.com' 'cmd':'authenticate' 'Login':'******' } form:wq
def actionCloudflare(ip): url = 'https://www.cloudflare.com/api.html?a=ban&key=%s&u=%s&tkn=%s' % ( ip, config.get('cloudflare', 'user'), config.get('cloudflare', 'token')) urllib.open(url).read()
def message(id, msg): import urllib params = urllib.urlencode({'id': id, 'msg': msg }) req = urllib.open("http://localhost:8080?%s" % params) print(req.read()) log_data.append(msg)
import urllib import os clipboard = "https://sites.google.com/site/cyke642/clipboard.py?attredirects=0" py2exe = "https://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.6.exe?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Fpy2exe%2Ffiles%2F&ts=1356117096&use_mirror=superb-dca3" f1 = open("py2exe") print"Downloading py2exe installer..." f1.write(urllib.open(py2exe).read()) print"Py2exe download finished." f1.close() f2 = open("clipboard") print"Downloading clipboard" f2.write(urllib.open(clipboard).read()) print"Clipboard download finished." f2.close() print"Please Install py2exe" os.system(os.getwd()+"py2exe") raw_input("Setup finished,Press enter to exit.")
def actionCloudflare(ip): url = 'https://www.cloudflare.com/api.html?a=ban&key=%s&u=%s&tkn=%s' % ( ip, config.get('cloudflare', 'user'), config.get( 'cloudflare', 'token')) urllib.open(url).read()
def getHtml(url): page = urllib.open(url) html = page.read() return html
def __send(cls, method: str, url: str, headers: dict, params: dict) -> dict: urllib.open(req) as res: return json.loads(res)
# coding: utf-8 import lxml.html html = lxml.html.parse(http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng) html = lxml.html.parse("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng") frienddivs = html.cssselect("div.user-passport") import urllib f_html = urllib.open("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng") f_html = urllib.urlopen("http://www.yelp.com/user_details_friends?userid=hX5D5lXijHFcm4WGtaRKng") html_str = "".join(f_html.readlines()) f_html.close() doc = lxml.html.fromstring(html_str) frienddivs = doc.cssselect("div.user-passport") len(frienddivs) friendids = [] for frienddiv in frienddivs: friendids.append(frienddiv.cssselect("div.photoBox a")[0].get("href")) friendids friendids_p = [] for id in friendids: import re friendids_p.append(re.sub(".+=", "", id)) friendids_p import json f = open("friends.json", "w") json.dump(friendids_p, f) f.close()
def getlongurl2.x(url): import urllib resp = urllib.open(url) return resp.url def getlongurl3.x(url): import urllib.request resp = urllib.request.urlopen(url) return resp.url
import time import unittest import operator import urllib # import webDriver # from Selenium import webdriver # import account response = urllib.open("www.baidu.com") print(response)
def get_page(pagelink): try: import urllib return urllib.open(pagelink).read() except: return ""
def read_url(url): websites[url] = urllib.open(url).read()
def getfilesize(url): f = u.open(url) size = f.info()['Content-Length'] return size
def get_user_tumblogs(self): response=urllib.open(self.options.api_base+'/authenticate',self.parameters) tumblrelement=ElementTree.parse(response.read()) tumblelogs=tumblrelement.xpath('tumblelog') return [t.get(url) for t in tumblelogs]
def get_page(url): try: import urllib return urllib.open(url).read() except: return ""
def speech(content): connection = urllib.open( "https://api.ispeech.org/api/rest?action=convert&text=content&voice=usenglishfemale &format=mp3 &frequency=44100 &bitrate=128&speed=1&startpadding=1 &endpadding=1 &pitch=110 &filename=myaudiofile" ) output = connection.read() print(output)
df['recommended_channel_names'] = df.index.to_series().map(recommendations) # Preparing the dataframe for conversion to a .csv file (to be made into a table in sqlite3 database) df_reduced = df.drop(['display_name', 'emotes', 'emotes_parsed', 'prefix', 'broadcaster_language', 'language', 'description', 'followers', 'views', 'mature', 'game', 'status', 'created_at', 'updated_at'], axis=1) # PUtting recommendations into their own columns df_reduced[['rec1','rec2','rec3','rec4', 'rec5','rec6', 'rec7','rec8', 'rec9','rec10']] = pd.DataFrame([x for x in df_reduced['recommended_channel_names']]) df_reduced = df_reduced.drop(['recommendations', 'recommended_channel_names'], axis=1) app_csv = df_reduced.to_csv('twitch.csv', index=False) ------------------------------------------------- # Getting emote urls response = urllib.open('https://api.twitch.tv/kraken/chat/emoticons') emotes = json.load(response) img_urls = pd.DataFrame.from_dict(emotes['emoticons']) urls = {} for i in xrange(len(img_urls)): urls[img_urls.iloc[i]['regex']] = img_urls.iloc[i]['images'][0]['url'] # Putting all the emotes into a list imgs = [] for i in xrange(len(df)): for j in xrange(len(df['emotes'].iloc[i])): imgs.append((df['name'].iloc[i], df['emotes'].iloc[i][j])) channel = []
print a[5] except: print "try语句块中出错了执行这里" #sys.exit(-1) else: print "try语句块中没出错执行这里" finally: print "无论如何都会执行这里,即使中途exit了" import urllib sth_url = "http://www.baidu.com" try: d = urllib.open(sth_url) except IOError: print "该url无法打开" except: print "其他错误" else: content = d.read() print content d.close() import logging logger = logging.getLogger() #创建一个logging对象 hdlr = logging.FileHandler('testlog.txt') #创建存放日志的文件句柄 formater = logging.Formatter('%(asctime)s %(levelname)s %(message)s') #设置日志格式 hdlr.setFormatter(formater) #文件绑定格式