def _download_url_to_file(url, dst, hash_prefix, progress): if requests_available: u = urlopen(url, stream=True) file_size = int(u.headers["Content-Length"]) u = u.raw else: u = urlopen(url) meta = u.info() if hasattr(meta, 'getheaders'): file_size = int(meta.getheaders("Content-Length")[0]) else: file_size = int(meta.get_all("Content-Length")[0]) f = tempfile.NamedTemporaryFile(delete=False) try: sha256 = hashlib.sha256() with tqdm(total=file_size, disable=not progress) as pbar: while True: buffer = u.read(8192) if len(buffer) == 0: break f.write(buffer) sha256.update(buffer) pbar.update(len(buffer)) f.close() digest = sha256.hexdigest() if digest[:len(hash_prefix)] != hash_prefix: raise RuntimeError('invalid hash value (expected "{}", got "{}")' .format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def _download_url_to_file(url, dst, hash_prefix=None, progress=True): # Reusing this from https://pytorch.org/docs/stable/_modules/torch/utils/model_zoo.html#load_url # With modifications. hash_prefix = None try: from tqdm import tqdm except ImportError: warnings.warn("Install tqdm to see download progress.") progress = None file_size = None if requests_available: u = urlopen(url, stream=True) if hasattr(u.headers, "Content-Length"): file_size = int(u.headers["Content-Length"]) u = u.raw else: u = urlopen(url) meta = u.info() if hasattr(meta, 'getheaders'): content_length = meta.getheaders("Content-Length") else: content_length = meta.get_all("Content-Length") if content_length is not None and len(content_length) > 0: file_size = int(content_length[0]) f = tempfile.NamedTemporaryFile(delete=False) try: if hash_prefix is not None: sha256 = hashlib.sha256() with tqdm(total=file_size, disable=not progress) as pbar: while True: buffer = u.read(8192) if len(buffer) == 0: break f.write(buffer) if hash_prefix is not None: sha256.update(buffer) pbar.update(len(buffer)) f.close() if hash_prefix is not None: digest = sha256.hexdigest() if digest[:len(hash_prefix)] != hash_prefix: raise RuntimeError( 'invalid hash value (expected "{}", got "{}")'.format( hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def connection(self): connected = False for _ in range(3): try: if not self.alive:self.exit() urlopen('https://example.com') connected = True break except:pass if not connected: print('Error: No Connection!') self.exit()
async def youtube(self, ctx, *, search): query_string = parse.urlencode({"search_query": search}) html_content = requests.urlopen("http://www.youtube.com/results?" + query_string) search_content = html_content.read().decode() search_results = re.findall(r"\/watch\?v=\w+", search_content) await ctx.send("https://www.youtube.com" + search_results[0])
def update(self): req = requests.Request(self.url, headers={'User-Agent': 'Mozilla/5.0'}) start_time = time.time() html = requests.urlopen(req).read() start_time = time.time() self.data = BeautifulSoup(html, 'html.parser') self.table = self.getTable()
def scrape_with_tor(url_request): SOCKS_PORT=9050# You can change the port number tor_process = stem.process.launch_tor_with_config( config = { 'SocksPort': str(SOCKS_PORT), }, ) socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=SOCKS_PORT) socket.socket = socks.socksocket # ... with requests.urlopen(url_request) as response: try: page = response.read() except (http.client.IncompleteRead) as e: page = e.partial #response = requests.get(url_request) fout = open('image.jpg', "wb") fout.write(response.content) fout.close() # ... tor_process.kill()
def get_images(self, url, tries=5): imgs = None try: imgs = bs(urlopen(url).text, 'html.parser').find_all('img') except: if tries: self.get_images(url, tries-1) if not imgs: return for img in imgs: _src = img.get('srcset') src = img.get('data-lazy-srcset') if _src: _src = _src.split()[0] src = src if src else _src if src: img_src = src.split(',')[0].split()[0] self.imgs.put(img_src) if not self.wait_threads: self.wait_threads += 1
def getAnimeRecs(anime, recNum): animeSearch = anime.replace(" ", "%20") baseUrl = "https://myanimelist.net/search/all?q=" + animeSearch html1 = requests.urlopen(baseUrl) soup = BeautifulSoup(html1, 'html.parser') animeUrl = soup.find("article").findAll("a")[0]['href'] + "/userrecs" html2 = requests.urlopen(animeUrl) soup = BeautifulSoup(html2, 'html.parser') currentRec = soup.findAll("tr")[0].findAll("tr")[4 + recNum] imageUrl = currentRec.findAll("div")[0].find("img")["data-src"].replace( "/r/50x70", "") animeTitle = currentRec.findAll("div")[3].text.strip().replace( " addpermalink", "") animeDesc = currentRec.findAll("div")[4].text.strip().replace( "\r", " ").split("\xa0")[0] return (imageUrl, animeTitle, animeDesc)
def _download_url_to_file(self, url, dst, hash_prefix, progress=True): u = urlopen(url) if requests_available: file_size = int(u.headers["Content-Length"]) u = u.raw else: meta = u.info() if hasattr(meta, 'getheaders'): file_size = int(meta.getheaders("Content-Length")[0]) else: file_size = int(meta.get_all("Content-Length")[0]) f = tempfile.NamedTemporaryFile(delete=False) try: sha256 = hashlib.sha256() with tqdm(total=file_size, disable=not progress) as pbar: while True: buffer = u.read(8192) if len(buffer) == 0: break f.write(buffer) sha256.update(buffer) pbar.update(len(buffer)) f.close() digest = sha256.hexdigest() if digest[:len(hash_prefix)] != hash_prefix: raise RuntimeError( 'invalid hash value (expected "{}", got "{}")'.format( hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def send_msg(mobile, item_name): """ 钉钉机器人API接口地址: https://oapi.dingtalk.com/robot/send?access_token=6bfc67fbdcdc3e2ea38e11d77a1d1f378188479102fcfcda53686debac0297aa :param mobile: :param itemName: :return: """ # 钉钉中webhook的值进行拼接 url = "https://oapi.dingtalk.com/robot/send?access_token=6bfc67fbdcdc3e2ea38e11d77a1d1f378188479102fcfcda53686debac0297aa" data = { "msgtype": "text", "text": { "content": item_name }, "at": { "atMobiles": [mobile], "isAtAll": "false" } } # 设置编码格式 json_data = json.dumps(data).encode(encoding='utf-8') print(json_data) header_encoding = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko', "Content-Type": "application/json" } req = requests.Request(url=url, data=json_data, headers=header_encoding) res = requests.urlopen(req) res = res.read() print(res.decode(encoding='utf-8'))
def download(self): with open(self.dir, 'wb') as f: try: f.write(urlopen(self.repo).content) except: self.display_msg('Error: Failed to download', -1) return return True
def set_max_downloads(self): html = urlopen(self.url).text container_div = bs(html, 'html.parser').find( 'div', {'class': 'media_list'} ) str_num = container_div.find_all( 'div')[-1].string.split()[0].replace(',', '') self.max_downloads = int(str_num)
def examine_ip(self, ip): try: stat = urlopen( self.ip_check.format(ip)).json()['suspicious_factors'] return not all([ stat['is_proxy'], stat['is_suspicious'], stat['is_tor_node'], stat['is_spam'] ]) except: pass
def download_stock_data(csv_url): response = requests.urlopen(csv_url) csv = response.read() csv_str = str(csv) lines = csv_str.split('\\n') dest_url = r'goog.csv' fx = open(dest_url,'w') for line in lines: fx.write(line + '\n') fx.close()
def landingCall(self, to, mediaName, mediaTxt, displayNum, playTimes, respUrl, userData, maxCallTime, speed, volume, pitch, bgsound): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/Calls/LandingCalls?sig=" + sig #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?><LandingCall>\ <to>%s</to><mediaName>%s</mediaName><mediaTxt>%s</mediaTxt><appId>%s</appId><displayNum>%s</displayNum>\ <playTimes>%s</playTimes><respUrl>%s</respUrl><userData>%s</userData><maxCallTime>%s</maxCallTime><speed>%s</speed> <volume>%s</volume><pitch>%s</pitch><bgsound>%s</bgsound></LandingCall>\ ''' % (to, mediaName, mediaTxt, self.AppId, displayNum, playTimes, respUrl, userData, maxCallTime, speed, volume, pitch, bgsound) if self.BodyType == 'json': body = '''{"to": "%s", "mediaName": "%s","mediaTxt": "%s","appId": "%s","displayNum": "%s","playTimes": "%s","respUrl": "%s","userData": "%s","maxCallTime": "%s","speed": "%s","volume": "%s","pitch": "%s","bgsound": "%s"}''' % ( to, mediaName, mediaTxt, self.AppId, displayNum, playTimes, respUrl, userData, maxCallTime, speed, volume, pitch, bgsound) req.params = body data = '' try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def ip_addr(self): try: ip = str(urlopen('https://api.ipify.org/?format=text', timeout=ip_fetch_timeout).text) self.last_restarted = None return ip except: if not self.last_restarted: self.last_restarted = time() self.restart_net_manager() else: if time() - self.last_restarted >= network_manager_time: self.last_restarted = time() self.restart_net_manager()
def __getpage(url): req = requests.request(url); user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 ' \ '(KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36' req.add_header('User-Agent', user_agent) try: response = requests.urlopen(req, timeout=60); except: return "error" pass else: page = response.read() return page
def voiceVerify(self, verifyCode, playTimes, to, displayNum, respUrl, lang, userData): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/Calls/VoiceVerify?sig=" + sig #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?><VoiceVerify>\ <appId>%s</appId><verifyCode>%s</verifyCode><playTimes>%s</playTimes><to>%s</to><respUrl>%s</respUrl>\ <displayNum>%s</displayNum><lang>%s</lang><userData>%s</userData></VoiceVerify>\ ''' % (self.AppId, verifyCode, playTimes, to, respUrl, displayNum, lang, userData) if self.BodyType == 'json': # if this model is Json ..then do next code body = '''{"appId": "%s", "verifyCode": "%s","playTimes": "%s","to": "%s","respUrl": "%s","displayNum": "%s","lang": "%s","userData": "%s"}''' % ( self.AppId, verifyCode, playTimes, to, respUrl, displayNum, lang, userData) req.params = body data = '' try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def querySubAccount(self, friendlyName): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/QuerySubAccountByName?sig=" + sig #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?><SubAccount><appId>%s</appId>\ <friendlyName>%s</friendlyName>\ </SubAccount>\ ''' % (self.AppId, friendlyName) if self.BodyType == 'json': body = '''{"friendlyName": "%s", "appId": "%s"}''' % (friendlyName, self.AppId) data = '' req.params = body try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def billRecords(self, date, keywords): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/BillRecords?sig=" + sig #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?><BillRecords>\ <appId>%s</appId><date>%s</date><keywords>%s</keywords>\ </BillRecords>\ ''' % (self.AppId, date, keywords) if self.BodyType == 'json': # if this model is Json ..then do next code body = '''{"appId": "%s", "date": "%s","keywords": "%s"}''' % ( self.AppId, date, keywords) req.params = body data = '' try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def QueryCallState(self, callid, action): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/ivr/call?sig=" + sig + "&callid=" + callid #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?><Request>\ <Appid>%s</Appid><QueryCallState callid="%s" action="%s"/>\ </Request>\ ''' % (self.AppId, callid, action) if self.BodyType == 'json': # if this model is Json ..then do next code body = '''{"Appid":"%s","QueryCallState":{"callid":"%s","action":"%s"}}''' % ( self.AppId, callid, action) req.params = body data = '' try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def MediaFileUpload(self, filename, body): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/Calls/MediaFileUpload?sig=" + sig + "&appid=" + self.AppId + "&filename=" + filename #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) req.headers["Authorization"] = auth if self.BodyType == 'json': req.headers["Accept"] = "application/json" req.headers["Content-Type"] = "application/octet-stream" else: req.headers["Accept"] = "application/xml" req.headers["Content-Type"] = "application/octet-stream" #创建包体 req.params = body try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def _download_distfile(distfile, url, hash, dest): try: try: distfile_source = urlopen(url).content except Exception: raise DownloadError("Failed to download %s" % distfile) if sha256(distfile_source).hexdigest() != hash: raise DownloadError("Got an invalid hash digest for %s" % distfile) try: distfile_file = open(dest, 'wb') distfile_file.write(distfile_source) distfile_file.close() except Exception: raise DownloadError("Writing %s" % distfile) DOWNLOAD_QUEUE.pop() except DownloadError, errmsg: DOWNLOAD_QUEUE.pop() DOWNLOAD_ERROR.append(errmsg)
def check(): """check if a repo checkout is up-to-date""" log("Checking the current revision id for your code.", PROGRESS) chdir(ENVIRON) revision_id = do( 'git', 'show', '--pretty=oneline', '--summary', redirect_stdout=True ).split()[0] log("Checking the latest commits on GitHub.", PROGRESS) commit_info = urlopen(get_conf('repo-check-url')).json latest_revision_id = commit_info['commit']['sha'] if revision_id != latest_revision_id: exit("A new version is available. Please run `git pull`.") log("Your checkout is up-to-date.", SUCCESS)
def ivrDial(self, number, userdata, record): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/ivr/dial?sig=" + sig #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) req.headers["Accept"] = "application/xml" req.headers["Content-Type"] = "application/xml;charset=utf-8" req.headers["Authorization"] = auth #创建包体 body = '''<?xml version="1.0" encoding="utf-8"?> <Request> <Appid>%s</Appid> <Dial number="%s" userdata="%s" record="%s"></Dial> </Request> ''' % (self.AppId, number, userdata, record) req.params = body data = '' try: res = requests.urlopen(req) data = res.read() res.close() xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def __init__(self, parent, movie, movie_id): super(IMDBBEntry, self).__init__(parent) path = os.path.abspath("extensions/plugins/imdb_finder/imdb_entry.ui") uic.loadUi(path, self) self.movie_obj = movie self.movie_id = movie_id self.hovered = False self.setStyleSheet("QWidget{background: transparent;}") try: self.title = movie['title'] self.lbl_Title.setText(self.title) except Exception as e: print(e) try: self.year = movie['year'] self.lbl_Year.setText(str(self.year)) except Exception as e: print(e) try: self.genres = movie['genres'] self.lbl_Genre.setText(str(self.genres)) self.lbl_Cast.setText(str(self.movie_id)) except Exception as e: print(e) try: url = movie['cover url'] data = requests.urlopen(url).read() image = QImage() image.loadFromData(data) self.lbl_img.setPixmap(QPixmap(image)) except Exception as e: self.lbl_img.setPixmap( QPixmap("extensions/plugins/imdb_finder/icon_no_preview.png")) print(e) self.show()
def _download_url_to_file(url, dst): u = urlopen(url) if requests_available: file_size = int(u.headers["Content-Length"]) u = u.raw else: meta = u.info() if hasattr(meta, 'getheaders'): file_size = int(meta.getheaders("Content-Length")[0]) else: file_size = int(meta.get_all("Content-Length")[0]) f = tempfile.NamedTemporaryFile(delete=False) with tqdm(total=file_size) as pbar: while True: buffer = u.read(8192) if len(buffer) == 0: break f.write(buffer) pbar.update(len(buffer)) f.close() shutil.move(f.name, dst)
def searchForSet(retX, retY, setNum, yr, numPce, origPrc): # 睡眠十秒 sleep(10) # 拼接查询的url字符串 myAPIstr = 'get from code.google.com' searchURL = 'https://www.googleapis.com/shopping/search/v1/public/products?\ key=%s&country=US&q=lego+%d&alt=json' % (myAPIstr, setNum) # 利用urllib2访问url地址 pg = requests.urlopen(searchURL) # 利用json打开和解析url获得的数据,数据信息存入字典中 retDict = json.load(pg.read()) # 遍历数据的每一个条目 for i in range(len(retDict['items'])): try: # 获得当前条目 currItem = retDict['items'][i] # 当前条目对应的产品为新产品 if currItem['product']['condition'] == 'new': # 标记为新 newFlag = 1 else: newFlag = 0 # 得到当前目录产品的库存列表 listOfInv = currItem['product']['inventories'] # 遍历库存中的每一个条目 for item in listOfInv: # 得到该条目玩具商品的价格 sellingPrice = item['price'] # 价格低于原价的50%视为不完整套装 if sellingPrice > (origPrc * 0.5): print('%d\t%d\t%d\t%f\t%f' % (yr, numPce, newFlag, \ origPrc, sellingPrice)) # 将符合条件套装信息作为特征存入数据矩阵 retX.append([yr, numPce, newFlag, origPce]) # 将对应套装的出售价格存入矩阵 retY.append(sellingPrice) except: print('problem with item %d' % i)
def CallResult(self, callSid): self.accAuth() nowdate = datetime.datetime.now() self.Batch = nowdate.strftime("%Y%m%d%H%M%S") #生成sig signature = self.AccountSid + self.AccountToken + self.Batch sig = hashlib.new('md5', signature.encode('utf-8')).hexdigest().upper() #拼接URL url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/CallResult?sig=" + sig + "&callsid=" + callSid #生成auth src = self.AccountSid + ":" + self.Batch auth = base64.encodestring(src).strip() req = requests.Request(url) self.setHttpHeader(req) body = '' req.headers["Authorization"] = auth data = '' try: res = requests.urlopen(req) data = res.read() res.close() if self.BodyType == 'json': #json格式 locations = json.loads(data) else: #xml格式 xtj = xmltojson() locations = xtj.main(data) if self.Iflog: self.log(url, body, data) return locations except Exception as error: if self.Iflog: self.log(url, body, data) return {'172001': '网络错误'}
def fetch_ip(self): try: data = urlopen(self.ip_url).json() return self.format(data) if 'ipPort' in data else None except:pass
def FindDetails(zipcode): url_str = "https://whoismyrepresentative.com/getall_mems.php?zip=" url_str1 = "&output=json" response = requests.urlopen(url_str+zipcode+url_str1)
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Created on 2015年10月22日 @author: zhang.xiuhai ''' import requests # Base URL being accessed url = 'http://httpbin.org/get' # Dictionary of query parameters (if any) parms = {'name1': 'value1', 'name2': 'value2'} # Encode the query string querystring = parse.urlencode(parms) # Make a GET request and read the response u = requests.urlopen(url + '?' + querystring) resp = u.read()
def update(_, settings, silent=False): try: response = urllib2.urlopen("https://github.com/Ripley6811/TAIMAU") except urllib2.URLError: if not silent: title = u'Can not connect to GitHub.' message = title + u'\nTry again later.' tkMessageBox.showerror(title, message) return html = response.read() prestring = 'commit/' start = html.find(prestring) + len(prestring) end = html.find('"', start) if _.debug: print start, end, html[start: end] SHA = html[start: end] # Check if commit number is stored in settings. # Save and exit if it is not there. try: if not silent: print u"Settings SHA:" print u"\t" + settings.load()['commit'] except KeyError: settings.update(commit = SHA) return if not silent: print u"Latest commit SHA:" print "\t" + SHA #============================================================================== # Compare SHA in settings to web site latest #============================================================================== if SHA == settings.load()['commit']: if not silent: title = message = u'Program is up to date!' tkMessageBox.showinfo(title, message) return else: title = u'New version available!' message = u'Current:\n ' + settings.load()['commit'] message += u'\nLatest:\n ' + SHA message += u'\n\nNew version is available.\nUpdate now?' confirm = tkMessageBox.askyesno(title, message) if not confirm: return else: src_url = "https://github.com/Ripley6811/TAIMAU/archive/master.zip" zipr = urlopen(src_url) zf = ZipFile(StringIO(zipr.content)) zf.extractall() # Copy files from master to src #XXX: From http://stackoverflow.com/questions/7419665/python-move-and-overwrite-files-and-folders root_src_dir = os.path.join(os.getcwd(),"TAIMAU-master\\src") root_dst_dir = os.path.join(os.getcwd(),"src") for src_dir, dirs, files in os.walk(root_src_dir): dst_dir = src_dir.replace(root_src_dir, root_dst_dir) if not os.path.exists(dst_dir): os.mkdir(dst_dir) for file_ in files: src_file = os.path.join(src_dir, file_) dst_file = os.path.join(dst_dir, file_) if os.path.exists(dst_file): os.remove(dst_file) shutil.move(src_file, dst_dir) # Delete the temp repo folder. shutil.rmtree(os.path.join(os.getcwd(),"TAIMAU-master")) # Update commit SHA in settings file. settings.update(commit = SHA) # Prompt user to restart program. title = u'Restart required.' message = u'Restart the program for latest version.' tkMessageBox.showwarning(title, message)
def html(): html = requests.urlopen()
def read(self): response = requests.urlopen(self.req) img_array = np.asarray(bytearray(response.read()), dtype=np.uint8) frame = cv2.imdecode(img_array, 1) return True, frame
import spidermonkey import requests import PyV8 js = spidermonkey.runtime() js_ctx = js.new_context() script = requests.urlopen('http://etherhack.co.uk/hashing/whirlpool/js/whirlpool.js').read() js_ctx.eval_script(script) js_ctx.eval_script('var s = "abc"') js_ctx.eval_script('print(HexWhirlpool(s))')