def tgbot_aux_hitokoto(): try: request = urllib2.Request("http://api.hitokoto.us/rand") request.add_header( "User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36" ) request.add_header("Referer", "http://webhook.iii.moe/telegram/bot") response = urllib2.urlopen(request) j = json.loads(response.read()) if j["source"] == "": s = "" else: s = "(%s)" % j["source"] if j["author"] == "": a = u"无名" else: a = j["author"] return u"%s\n——%s %s" % (j["hitokoto"], a, s) except Exception: return "Hitokoto service temporarily unavailable, please try again later."
def book_API(search): book_list=[] for text in search: client_id = "qtW2OiOAlSkWxvXskK5V" client_secret = "nG5MjeQ1NM" encText = urllib.parse.quote(text) url = "https://openapi.naver.com/v1/search/book?query=" + encText + "&display=3&sort=count" request = urllib.request.Request(url) request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) response = urllib.request.urlopen(request) rescode = response.getcode() if (rescode == 200): response_body = response.read() text_data = response_body.decode('utf-8') books = json.loads(text_data) book_list.extend(books["items"]) else: print("Error Code:" + rescode) img_list = [] title_list = [] link_list = [] for i in range(len(book_list)): img_list.append(book_list[i]["image"]) for i in range(len(book_list)): title_list.append(Markup(str(book_list[i]["title"]))) for i in range(len(book_list)): link_list.append(Markup(str(book_list[i]["link"]))) return img_list,title_list,link_list
def home(): # # text_str = ''' # Bong Joon-ho (Korean: 봉준호, Korean pronunciation: [poːŋ tɕuːnho → poːŋdʑunɦo]; born September 14, 1969) is a South Korean film director and screenwriter. He garnered international acclaim for his second feature film Memories of Murder (2003), before achieving commercial success with his subsequent films The Host (2006) and Snowpiercer (2013), both of which are among the highest-grossing films of all time in South Korea.[1] # Two of his films have screened in competition at the Cannes Film Festival—Okja, which premiered at the 2017 Cannes Film Festival, and Parasite, which won the Palme d'Or at the 2019 Cannes Film Festival.[2] He became the first Korean director to win the Palme d'Or.[3] Parasite also won Best Foreign Language Film at the 77th Golden Globe Awards, with Bong nominated for Best Director and Best Screenplay for his work.[4] Following the film's nomination for Best International Feature Film at the 92nd Academy Awards, Parasite became the first South Korean film to receive an Academy Award nomination in any category. For his work on the film, he received nominations for Best Director, Best Original Screenplay, and Best Picture. # In 2017, Metacritic ranked Bong sixteenth on its list of the 25 best film directors of the 21st century.[5] His films feature timely social themes, genre-mixing, black humor, and sudden mood shifts.[6] # ''' # print( dl(text_str) ) # print(PI) import os import sys import urllib.request import ml client_id = ml.CLIENT_ID #"SkSQh3KCVw5NR7_F9Emw" # 개발자센터에서 발급받은 Client ID 값 client_secret = ml.SECRET_KEY #"3UVO2Hhw8c" # 개발자센터에서 발급받은 Client Secret 값 encText = urllib.parse.quote("반갑습니다") # 한글의 URL 인코딩 처리 => %2D.... 변환처리 data = "source=ko&target=en&text=" + encText # 파라미터 구성 url = "https://openapi.naver.com/v1/papago/n2mt" # 주소 request = urllib.request.Request(url) # 요청객체 생성 request.add_header("X-Naver-Client-Id", client_id) # 헤더 설정 request.add_header("X-Naver-Client-Secret", client_secret) # 헤더 설정 # javascript의 $.post와 같다. response = urllib.request.urlopen(request, data=data.encode("utf-8")) # 요청 rescode = response.getcode() # 응답코드 획득 if (rescode == 200): # 응답 성공 response_body = response.read() print(response_body.decode('utf-8')) else: print("Error Code:" + rescode) return render_template('index.html')
def baidu_ocr(image): import urllib.parse import urllib.request import urllib # 获取access_token host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=CyIgU1tQyZpd4pRPTyKmRGAK&client_secret=6zt4hme0z7rE7INmuX71erN1v8nMhHEn' request = urllib.request.Request(host) request.add_header('Content-Type', 'application/json; charset=UTF-8') response = urllib.request.urlopen(request) res = getjsontext(response) access_token = res['access_token'] # data = base64.b64encode(image) # print(data) params = {"image": image} params = urllib.parse.urlencode(params) url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic' # url="https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic" # url='https://aip.baidubce.com/rest/2.0/ocr/v1/general' url = url + "?access_token=" + access_token req = urllib.request.Request(url, params.encode()) req.add_header("Content-Type", "application/x-www-form-urlencoded") # req.add_header(API_KEY, SECRET_KEY) resp = urllib.request.urlopen(req) content = getjsontext(resp) print(content) str_list = get_words(content["words_result"]) print(str_list) return str_list
def GeoCoordinates(addr): import os import sys import urllib.request client_id = "ZbNWupFQAk_cmBAmLstr" client_secret = "5_knjXBU5Z" encText = urllib.parse.quote(addr) url = "https://openapi.naver.com/v1/map/geocode?query=" + encText # json 결과 # url = "https://openapi.naver.com/v1/map/geocode.xml?query=" + encText # xml 결과 request = urllib.request.Request(url) request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) response = urllib.request.urlopen(request) rescode = response.getcode() if (rescode == 200): response_body = response.read() mystr = response_body.decode('utf-8') mystr = mystr.replace('true', "True") mystr = mystr.replace('false', "False") mydic = eval(mystr) else: print("Error Code:" + rescode) return mydic
def bridgeConnect(self, address, password): """ Create a connection to a bridge from a client Parameters: address- the ip address to connect to password- the password to send in the payload of the packet Notes: this function will send a packet to the server via market encoding with the password hidden in the payload Note: the function will initiate a connection with the bridge by sending a market encoded url with a payload of just the connect password. After sending the GET request, the function will use the returned image (just padding) to initialize the session ID Returns: whatever state you need to keep using headless web kit """ data = self.assembler.assemble(password) encodedData = urlEncode.encodeAsMarket(data) request = urllib2.Request(encodedData['url']) for cookie in encodedData['cookie']: request.add_header('Cookie:', cookie) reader = urllib2.urlopen(request) image = reader.read() # use the returned image to initialize the session ID decodedData = imageEncode.decode(image, 'png') self.disassembler.disassemble(decodedData) self.assembler.setSessionID(self.disassembler.getSessionID())
def call_api(): """ Call an api using the Access Token :return: the index template with the data from the api in the parameter 'data' """ if 'session_id' in session: user = _session_store.get(session['session_id']) if not user: return redirect_with_baseurl('/') if 'api_endpoint' in _config: user.api_response = None if user.access_token: try: request = urllib2.Request(_config['api_endpoint']) request.add_header("Authorization", "Bearer %s" % user.access_token) response = urllib2.urlopen(request) user.api_response = { 'code': response.code, 'data': response.read() } except urllib2.HTTPError as e: user.api_response = {'code': e.code, 'data': e.read()} else: user.api_response = None print 'No access token in session' else: user.api_response = None print 'No API endpoint configured' return redirect_with_baseurl('/')
def lang_transByPapago(text, na_code='en'): # 사용자 인증키 설정 client_id = Client_ID client_secret = Client_Secret # 전송할 데이터(번역요청원문) URL 인코딩 처리( 목적: 한글깨짐 방지 ) encText = urllib.parse.quote(text) # 통신 준비 data = "source={0}&target=ko&text={1}".format( na_code, encText) # {}: .format(0번인자, 1번인자) %: % format url = "https://openapi.naver.com/v1/papago/n2mt" # 요청객체 생성 request = urllib.request.Request(url) # 해더 설정 request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) # 통신 response = urllib.request.urlopen(request, data=data.encode("utf-8")) rescode = response.getcode() if (rescode == 200): # 통신 성공 => 응답데이터를 json로드를 통해 파이썬의 객체로 리턴 return json.load(response) # papago 응답 예시는 json의 형태로 작성되어있음(딕셔너리 작성됨) #response_body = response.read() #print(response_body.decode('utf-8')) else: # 통신 실패 return {} # 번역결과 없음 -> 딕셔너리
def jamfcall(resource, username, password, method='', data=None): # create a new request object with resource URL request = urllib2.Request(resource) # Add auth header request.add_header('Authorization', 'Basic ' + base64.b64encode(username + ':' + password)) # add get_method if request is a post, put, or delete if method.upper() in ('POST', 'PUT', 'DELETE'): request.get_method = lambda: method # add content type if request is post, put and there is data if method.upper() in ('POST', 'PUT') and data: # Add in content type header request.add_header('Content-Type', 'text/xml') # send request with data and return results reponse = urllib2.urlopen(request, data) # Convert response to text computerxml = response.read() # Create new Element Tree with computer xml root = ET.fromstring(computerxml) return root else: # send request and return results response = urllib2.urlopen(request) # Convert response to text computerxml = response.read() # Create new Element Tree with computer xml root = ET.fromstring(computerxml) return root
def authenticate(): session.clear() session['oauth_secret'] = '' requestParams = { "oauth_callback" : "http://192.168.1.5:5000/authorised", "oauth_consumer_key" : consumer_key, "oauth_nonce" : str(random.randint(1, 999999999)), "oauth_signature_method" : "HMAC-SHA1", "oauth_timestamp" : int(time.time()), "oauth_version" : "1.0" } theSig = sign_request(requestParams, "POST", "https://api.twitter.com/oauth/request_token") requestParams["oauth_signature"] = theSig request = urllib2.Request("https://api.twitter.com/oauth/request_token", "") request.add_header("Authorization", create_oauth_headers(requestParams)) try: httpResponse = urllib2.urlopen(request) except urllib2.HTTPError as e: return e.read() responseData = getParameters(httpResponse.read()) session['oauth_token'] = responseData['oauth_token'] session['oauth_secret'] = responseData['oauth_token_secret'] return redirect("https://api.twitter.com/oauth/authorize?oauth_token=" + session['oauth_token'])
def couchpotato_api(method, params=None, use_json=True, dev=False): username = get_setting_value('couchpotato_user') password = get_setting_value('couchpotato_password') if params: params = '/?%s' % params else: params = '/' params = (params).replace(' ', '%20') url = '%s/%s%s' % (couchpotato_url(), method, params) request = urllib2.Request(url) if username and password: base64string = base64.encodestring( '%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) data = urllib2.urlopen(request).read() if dev: print url print data if use_json: data = json.JSONDecoder().decode(data) return data
def api_search(request=request): client_id = "L2VNblW5zyeEhe1spHWn" client_secret = "wLXwWM8n0C" keyword_receive = request.form['keyword_give'] baseurl = "https://openapi.naver.com/v1/search/movie?query=" # json 결과 plusurl = keyword_receive url = baseurl + urllib.parse.quote(plusurl) request = urllib.request.Request(url) request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) response = urllib.request.urlopen(request) result = response.read() r = json.loads(result) gus = r['items'] for gu in gus: doc = { 'title': gu['title'], 'link': gu['link'], 'image': gu['image'], 'actor': gu['actor'], 'director': gu['director'] } if gus is not None: db.movies.insert_one(doc) else: return False return jsonify({'result': 'success'})
def getFileList(studyID): url = 'http://wp-p3s-15.ebi.ac.uk:5000/metabolights/ws/studies/{study_id}/files?include_raw_data=false'.format( study_id=studyID) request = urllib.request.Request(url) request.add_header('user_token', app.config.get('METABOLIGHTS_TOKEN')) response = urllib.request.urlopen(request) content = response.read().decode('utf-8') j_content = json.loads(content) assay_file, sample_file, investigation_file, maf_file = [], '', '', [] for files in j_content['study']: if files['status'] == 'active' and files['type'] == 'metadata_assay': assay_file.append(files['file']) continue if files['status'] == 'active' and files[ 'type'] == 'metadata_investigation': investigation_file = files['file'] continue if files['status'] == 'active' and files['type'] == 'metadata_sample': sample_file = files['file'] continue if files['status'] == 'active' and files['type'] == 'metadata_maf': maf_file.append(files['file']) continue if assay_file == []: print('Fail to load assay file from ', studyID) if sample_file == '': print('Fail to load sample file from ', studyID) if investigation_file == '': print('Fail to load investigation file from ', studyID) if maf_file == []: print('Fail to load maf file from ', studyID) return assay_file, investigation_file, sample_file, maf_file
def get_diematic_data(): s = API_SERVER + '/registers' request = urllib2.Request(s) request.add_header('Content-Type', 'application/json') try: response = urllib2.urlopen(request, timeout=10) resp_str = response.read() except: print "problem reading from URL" return {} # print "response str=", resp_str if len(resp_str) > 0: try: data = json.loads(resp_str) print "got data" except: print "error decoding data" return {} if data is not None and data != {}: return data else: print "couldn't read proper data" return {} else: print "no proper response" return {}
def getImage(title): client_id = "fechS4lsKMLVwarW0I01" client_secret = "MxwdD119Rv" encText = urllib.parse.quote(title) image_url = 'https://openapi.naver.com/v1/search/image?query=' + encText request = urllib.request.Request(image_url) request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) response = urllib.request.urlopen(request) rescode = response.getcode() img_not_found = '<Photo>http://ugimoa.com/xeshop/img/no_images.jpg</Photo>' if (rescode == 200): response_body = response.read() locinfo = response_body.decode('utf-8') json_data = json.loads(locinfo) item = json_data.get('items') if item: if len(item) == 1: j = 0 else: j = random.randint(0, len(item) - 1) s_title = remove_tag(item[j].get('title')) s_link = remove_tag(item[j].get('link')) image_information = '<Photo>' + s_link + '</Photo>' #[s_title, s_telephone, s_address, s_roadAddress, s_mapy, s_mapx] return image_information else: return img_not_found
def get_nextbus_time(stop, direction, route): '''Returns next arrival time for given route and starting point scraped from NextBus. ''' request = urllib2.Request("http://www.nextbus.com/predictor/simplePrediction.shtml?a=georgia-tech&r="+route+"&d="+ direction +"&s="+stop) request.add_header('User-agent','Mozilla/5.0') # Need to fake a user agent or else nextbus will reject the connection result = urllib2.urlopen(request) response = result.read() # Scrape prediction soup = BeautifulSoup(response) available = True if not soup: available = False result = DEFAULT_MAX_TIME if soup.findAll(text='No prediction') != []: available = False result = DEFAULT_MAX_TIME if available: prediction = soup.find('td', {'class':"predictionNumberForFirstPred"}) result = [] if prediction and prediction.find('div'): result = prediction.find('div').string.split(';')[1].strip() if result in ["Arriving", "Departing"]: result = 0 else: result = DEFAULT_MAX_TIME return int(result)
def authenticate(): #Clear the existing session variables. session.clear() session['oauth_secret'] = '' requestParams = { "oauth_callback": "http://127.0.0.1:5000/authorised", "oauth_consumer_key": consumer_key, "oauth_nonce": str(random.randint(1, 999999999)), "oauth_signature_method": "HMAC-SHA1", "oauth_timestamp": int(time.time()), "oauth_version": "1.0" } receivedSig = signatureRequest( requestParams, "POST", "https://api.twitter.com/oauth/request_token") requestParams["oauth_signature"] = receivedSig request = urllib2.Request("https://api.twitter.com/oauth/request_token", "") request.add_header("Authorization", formulateOauthHeaders(requestParams)) try: httpResponse = urllib2.urlopen(request) except urllib2.HTTPError, e: return e.read()
def map_jobs(jobs): ''' Send an collection of mapping jobs to the API in order to obtain the associated FIGI(s). Parameters ---------- jobs : list(dict) A list of dicts that conform to the OpenFIGI API request structure. See https://www.openfigi.com/api#request-format for more information. Note rate-limiting requirements when considering length of `jobs`. Returns ------- list(dict) One dict per item in `jobs` list that conform to the OpenFIGI API response structure. See https://www.openfigi.com/api#response-fomats for more information. ''' handler = urllib2.HTTPHandler() opener = urllib2.build_opener(handler) openfigi_url = 'https://api.openfigi.com/v1/mapping' request = urllib2.Request(openfigi_url, data=json.dumps(Ids)) request.add_header('Content-Type', 'application/json') if openfigi_apikey: request.add_header('X-OPENFIGI-APIKEY', openfigi_apikey) request.get_method = lambda: 'POST' connection = opener.open(request) if connection.code != 200: raise Exception('Bad response code {}'.format(str( response.status_code))) return json.loads(connection.read())
def posttoWP(): request = urllib2.Request(url) request.add_header('Authorization', credentialBing) requestOpener = urllib2.build_opener() response = requestOpener.open(request) results = json.load(response) image = results['d']['results'][0]['Thumbnail']['MediaUrl'] response = requests.get(image, stream=True) with open('testimage'+h+m+s+'.jpg', 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response print image url = "http://www.easyinjury.com/xmlrpc.php" ##URL: www.easyinjury.com ##username: James ##pwd: mUFmNPvaXefAlgVTaTE#B2ku wp = wordpresslib.WordPressClient(url, 'James', 'mUFmNPvaXefAlgVTaTE#B2ku') wp.selectBlog(0) imageSrc = wp.newMediaObject('testimage'+h+m+s+'.jpg') #Used this format so that if i post images with the same name its unlikely they will override eachother img = '/wp-content/uploads/'+yr+'/'+mo+'/testimage'+h+m+s+'.jpg' post = wordpresslib.WordPressPost() post.title = 'Title' post.description = '<img src="'+img+'"/>' + 'Content' post.tags = ["wordpress", "lib", "python"] # Set to False to save as a draft idPost = wp.newPost(post, True)
def get_google_voice(phrase,lang,file_name): """ Function that will send http request to google translate and save audio file from responce with voiced input phrase. Parameters: @phrase: phrase to voice. Returns: If ok - name of created file, else - returns None. """ language=lang #Setting language. url = "http://translate.google.com/translate_tts" #Google translate url for getting sound. user_agent="Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5." #file_name="temp.mp3" #Temp file for saving our voiced phrase. params = urllib.urlencode({'q':phrase, 'tl':language}) #query parameters. request = urllib2.Request(url, params) #http request. request.add_header('User-Agent', user_agent) #adding agent as header. response = urllib2.urlopen(request) if response.headers['User-Agent'] == 'audio/mpeg': #I made a GET request here. CORRECT ME IF I'M WRONG with open(file_name, 'wb') as file: file.write(response.read()) return file_name else: return None
def run_client(self): # initialize the connection self.assembler = frame.Assembler() # bind to a local address and wait for Tor to connect self.torBinder = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.torBinder.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.torBinder.bind(('localhost', CLIENT_LISTEN_PORT)) self.torBinder.listen(1) (self.torSock, address) = self.torBinder.accept() #now that we have a Tor connection, start sending data to server self.bridgeConnect(TOR_BRIDGE_ADDRESS, TOR_BRIDGE_PASSWORD) self.timeout = datetime.now() while 1: # wait for data to send from Tor. Wait at most readyToRead, readyToWrite, inError = \ select.select([self.torSock], [], [], TIMEOUT) if readyToRead != []: dataToSend = readyToRead[0].recv(1024 * 1000) print "Client Sending: {}".format(dataToSend) # if there is less than 35 bytes of data to send, then make # sure that we still send it while dataToSend != '': segment = dataToSend[:35] dataToSend = dataToSend[35:] # put the headers on the data (not the actual function name) framed = self.assembler.assemble(segment) # encode the data encoded = urlEncode.encode(framed, 'market') # send the data with headless web kit request = urllib2.Request(encoded['url']) for cookie in encoded['cookie']: request.add_header('Cookie:', cookie) reader = urllib2.urlopen(request) readData = reader.read() # if we have received data from the Internet, then send it up to Tor decoded = imageEncode.decode(readData, 'png') self.disassembler.disassemble(decoded) self.timeout = datetime.now() else: dataToSend = '' # put the headers on the data (not the actual function name) framed = self.assembler.assemble(dataToSend) # encode the data encoded = urlEncode.encode(framed, 'market') # send the data with headless web kit request = urllib2.Request(encoded['url']) reader = urllib2.urlopen(request) readData = reader.read() # if we have received data from the Internet, then send it up to Tor decoded = imageEncode.decode(readData, 'png') self.disassembler.disassemble(decoded) # if we go have not received or send data for 10 min, end the program if (datetime.now() - self.timeout).total_seconds() > 60 * 10: # close the local socket to tor self.torSock.close() break
def run_client(self): # initialize the connection self.assembler = frame.Assembler() # bind to a local address and wait for Tor to connect self.torBinder = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.torBinder.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.torBinder.bind(('localhost', CLIENT_LISTEN_PORT)) self.torBinder.listen(1) (self.torSock, address) = self.torBinder.accept() #now that we have a Tor connection, start sending data to server self.bridgeConnect(TOR_BRIDGE_ADDRESS, TOR_BRIDGE_PASSWORD) self.timeout = datetime.now() while 1: # wait for data to send from Tor. Wait at most readyToRead, readyToWrite, inError = \ select.select([self.torSock], [], [], TIMEOUT) if readyToRead != []: dataToSend = readyToRead[0].recv(1024*1000) print "Client Sending: {}".format(dataToSend) # if there is less than 35 bytes of data to send, then make # sure that we still send it while dataToSend != '': segment = dataToSend[:35] dataToSend = dataToSend[35:] # put the headers on the data (not the actual function name) framed = self.assembler.assemble(segment) # encode the data encoded = urlEncode.encode(framed, 'market') # send the data with headless web kit request = urllib2.Request(encoded['url']) for cookie in encoded['cookie']: request.add_header('Cookie:', cookie) reader = urllib2.urlopen(request) readData = reader.read() # if we have received data from the Internet, then send it up to Tor decoded = imageEncode.decode(readData, 'png') self.disassembler.disassemble(decoded) self.timeout = datetime.now() else: dataToSend = '' # put the headers on the data (not the actual function name) framed = self.assembler.assemble(dataToSend) # encode the data encoded = urlEncode.encode(framed, 'market') # send the data with headless web kit request = urllib2.Request(encoded['url']) reader = urllib2.urlopen(request) readData = reader.read() # if we have received data from the Internet, then send it up to Tor decoded = imageEncode.decode(readData, 'png') self.disassembler.disassemble(decoded) # if we go have not received or send data for 10 min, end the program if (datetime.now() - self.timeout).total_seconds() > 60*10: # close the local socket to tor self.torSock.close() break
def account(): request = urllib2.Request("https://livestreamapis.com/v1/accounts") base64string=base64.encodestring(api_key+':').replace('\n','') request.add_header("Authorization","Basic "+base64string) u=urllib2.urlopen(request) response=json.loads(u.read()) fullName=response[0]['fullName'] accountId=response[0]['id'] return render_template('accounts.html',fullName=fullName, accountId=accountId)
def tgbot_aux_ip(ip): try: # location postData = "ip=" + urllib.quote_plus(ip.encode("utf-8")) requestBody = urllib2.Request("https://www.ipip.net/ip.html", postData) requestBody.add_header("Cookie", "") requestBody.add_header( "User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36" ) requestBody.add_header("Referer", "https://www.ipip.net/ip.html") requestBody.add_header("Origin", "https://www.ipip.net") response = urllib2.urlopen(requestBody) html = response.read() if u"请正确输入IP" in html.decode("utf-8"): return "Invalid IP." a = re.findall('<div>\s*<span id="myself">\s*([\W\w]*?)\s*</span>', html, re.DOTALL) l = re.findall('ip_data = {.+?"latitude":"(.+?)".+?":"(.+?)"', html, re.DOTALL) if len(a) < 1: return "IP info service temporarily unavailable, please try again later. (Error 0)" if len(l) == 2: return "IP info service temporarily unavailable, please try again later. (Error 1)" # human rate requestBodyb = urllib2.Request( "https://ip.rtbasia.com/webservice/ipip?ipstr=" + ip) requestBodyb.add_header("Referer", "https://www.ipip.net/ip.html") responseb = urllib2.urlopen(requestBodyb) b = re.findall('<label.*?>(.*?)</label>', responseb.read(), re.DOTALL) if len(l) < 1: l = [(u"无数据", u"无数据")] ld = "" else: request = urllib2.Request( "http://maps.google.com/maps/api/geocode/json?latlng=%s,%s&language=zh-CN&sensor=false" % (l[0][0], l[0][1])) request.add_header( "User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36" ) response = urllib2.urlopen(request) j = json.loads(response.read()) ld = j["results"][0]["formatted_address"] if len(b) < 1: b = [u"无数据"] # final return u"%s, %s, %s, %s" % (ip, a[0].decode("utf-8"), ld, b[0].decode("utf-8").replace(" ", "")) except Exception: return u"IP info service temporarily unavailable, please try again later. (Error 2)"
def downloadSubmission(formid, submissionid): requestURL = 'http://127.0.0.1:8080/ODKAggregate/view/downloadSubmission?' requestURL += 'formId=' + formid requestURL += '[@version=null%20and%20@uiVersion=null]/' requestURL += formid.upper() requestURL += '[@key=' + submissionid + ']' request = urllib2.Request(requestURL) request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)') return urllib2.urlopen(request).read()
def sendSparkPOST(url, data): request = urllib2.Request(url, json.dumps(data), headers={ "Accept": "application/json", "Content-Type": "application/json" }) request.add_header("Authorization", "Bearer " + bearer) contents = urllib2.urlopen(request).read() return contents
def __init__(self, studyID, user_token): try: url = 'https://www.ebi.ac.uk/metabolights/webservice/study/' + studyID request = urllib.request.Request(url) request.add_header('user_token', user_token) response = urllib.request.urlopen(request) content = response.read().decode('utf-8') self.study_content = json.loads(content) except: print('cant find study', studyID)
def get ( self ): request = urllib2.Request('%s/%s' % (API_URL, 'candidates')) # You need the replace to handle encodestring adding a trailing newline # (https://docs.python.org/2/library/base64.html#base64.encodestring) base64string = base64.encodestring('%s:%s' % (API_KEY, '')).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) response = urllib2.urlopen(request) return json.loads(response.read())
def get_image(): # http://10.0.10.173/tmpfs/snap.jpg?user=guest&pwd=automaton request = urllib2.Request("http://10.0.10.173/tmpfs/snap.jpg") username = '******' password = '******' base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) result = urllib2.urlopen(request) return send_file(result, mimetype='image/jpg', cache_timeout=1)
def get(self,data): request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect" data=data.replace('sunzhg','/') params = "{\"image\":\""+data+"\",\"image_type\":\"BASE64\",\"face_field\":\"faceshape,facetype,age,beauty,expression,gender,glasses,race,quality\"}" access_token = '24.e350694767f9b2643953f6c2ac212c8a.2592000.1533346847.282335-11488479' request_url = request_url + "?access_token=" + access_token request = urllib.request.Request(url=request_url, data=params.encode()) request.add_header('Content-Type', 'application/json') response = urllib.request.urlopen(request) content = response.read() return content.decode()
def request_canned_message(title): url = 'http://127.0.0.1:8000/canned' data = { "topic": title, } params = json.dumps(data).encode('utf-8') request = Request(url, data=params) request.add_header('Content-Type', 'application/json') reply = urlopen(request).read().decode()
def get_nextbus_time(stop, direction, route): '''Returns next arrival time for given route and starting point scraped from NextBus. ''' request = urllib2.Request("http://www.nextbus.com/predictor/simplePrediction.shtml?a=georgia-tech&r="+route+"&d="+ direction +"&s="+stop) request.add_header('User-agent','Mozilla/5.0') # Need to fake a user agent or else nextbus will reject the connection try: result = urllib2.urlopen(request) response = result.read() except urllib2.URLError, e: # Error in reaching to NextBus servers return DEFAULT_MAX_TIME
def get_accumulated_cash_back(current_user): context = ssl._create_unverified_context() #TODO make the create user validate and remove . and - from cpf url = "https://mdaqk8ek5j.execute-api.us-east-1.amazonaws.com/v1/cashback?cpf={}".format( str(current_user.cpf).replace('.', '').replace('-', '')) request = urllib.request.Request(url) request.add_header("token", "'ZXPURQOARHiMc6Y0flhRC1LVlZQVFRnm'") response = urllib.request.urlopen(request, context=context) data = json.loads(response.read()) return jsonify({'accumulated': data['body']['credit']})
def get_jwks_data(self): request = urllib.request.Request(self.jwks_uri) request.add_header('Accept', 'application/json') request.add_header('User-Agent', 'CurityExample/1.0') try: jwks_response = urllib.request.urlopen(request, context=self.ctx) except Exception as e: print("Error fetching JWKS", e) raise e return jwks_response.read()
def create_package(base_url, data=None, api_key=None): """Post a data dict to one of the actions of the CKAN action API. See the documentation of the action API, including each of the available actions and the data dicts they accept, here: http://docs.ckan.org/en/ckan-1.8/apiv3.html :param base_url: the base URL of the CKAN instance to post to, e.g. "http://datahub.io/" :type base_url: string :param action: the action to post to, e.g. "package_create" :type action: string :param data: the data to post (optional, default: {}) :type data: dictionary :param api_key: the CKAN API key to put in the 'Authorization' header of the HTTP request (optional, default: None) :type api_key: string :returns: the dictionary returned by the CKAN API, a dictionary with three keys 'success' (True or False), 'help' (the docstring for the action posted to) and 'result' in the case of a successful request or 'error' in the case of an unsuccessful request :rtype: dictionary """ if data is None: # Even if you don't want to post any data to the CKAN API, you still # have to send an empty dict. data = {} path = '/api/3/action/package_create' url = urlparse.urljoin(base_url, path) request = urllib2.Request(url) if api_key is not None: request.add_header('Authorization', api_key) try: response = urllib2.urlopen(request, urllib.quote(json.dumps(data))) # The CKAN API returns a dictionary (in the form of a JSON string) # with three keys 'success' (True or False), 'result' and 'help'. d = json.loads(response.read()) assert d['success'] is True return d except urllib2.HTTPError, e: # For errors, the CKAN API also returns a dictionary with three # keys 'success', 'error' and 'help'. error_string = e.read() try: d = json.loads(error_string) if type(d) is unicode: # Sometimes CKAN returns an error as a JSON string not a dict, # gloss over it here. return {'success': False, 'help': '', 'error': d} assert d['success'] is False return d except ValueError: # Sometimes CKAN returns a string that is not JSON, lets gloss # over it. return {'success': False, 'error': error_string, 'help': ''}
def account(): request = urllib2.Request("https://livestreamapis.com/v1/accounts") base64string = base64.encodestring(api_key + ':').replace('\n', '') request.add_header("Authorization", "Basic " + base64string) u = urllib2.urlopen(request) response = json.loads(u.read()) fullName = response[0]['fullName'] accountId = response[0]['id'] return render_template('accounts.html', fullName=fullName, accountId=accountId)
def index(): """Obtain a bearer token.""" encoded_bearer_token = base64.b64encode('%s:%s' % ("WdPaWyppZ8ZfuUJOng9g", "zIamt9Liy4vzqpIuVOdPfh3umI6QWBVVNAqj3uAnMk")) request = urllib2.Request(REQUEST_TOKEN_URL) request.add_header('Content-Type', 'application/x-www-form-urlencoded;charset=UTF-8') request.add_header('Authorization', 'Basic %s' % encoded_bearer_token) request.add_data('grant_type=client_credentials') response = urllib2.urlopen(request) data = json.load(response) app.secret_key=escape(data['access_token']) return render_template("index.html")
def fetchUrl(url): FP_COOKIE = open("mycookies.txt","r") ARGS_COOKIE = FP_COOKIE.readline() FP_COOKIE.close() try: request = urllib2.Request(url); request.add_header('Cookie',ARGS_COOKIE) response = urllib2.urlopen(request) return response.read() except: print '[error]Fetch html fail' print url return ''
def send_sms(message, to='858-663-2602'): if app.config['TESTING']: return 'testing!' username = '******' password = '******' post_params = urllib.urlencode({'From' : '858-367-9918', 'To' : to, 'Body' : message}) request = urllib2.Request('https://api.twilio.com/2010-04-01/Accounts/AC42de1d02120c4ee461f62f80a06d81f9/SMS/Messages') base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) result = urllib2.urlopen(request, post_params) return 'done!'
def trak_api(url, params={}): username = get_setting_value('trakt_username') password = hashlib.sha1(get_setting_value('trakt_password')).hexdigest() params = json.JSONEncoder().encode(params) request = urllib2.Request(url, params) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) response = urllib2.urlopen(request) response = response.read() return json.JSONDecoder().decode(response)
def couchpotato_proxy(url): username = get_setting_value('couchpotato_user') password = get_setting_value('couchpotato_password') url = '%s/file.cache/%s' % (couchpotato_url(), url) request = urllib2.Request(url) if username and password: base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) img = StringIO.StringIO(urllib2.urlopen(request).read()) logger.log('CouchPotato :: Fetching image from %s' % (url), 'DEBUG') return send_file(img, mimetype='image/jpeg')
def tmdb_api(id, param='', dev=False): url = 'http://api.themoviedb.org/3/movie/' + id url += param url += '?api_key=' + tmdb_apikey request = urllib2.Request(url) request.add_header("Accept", "application/json") data = urllib2.urlopen(request).read() data = json.JSONDecoder().decode(data) if dev: print url print json.dumps(data, sort_keys=True, indent=4) return data
def do_get_request(url, key=None): request = urllib2.Request(url) request.add_header('token', 'hack2016-grupo7') try: response = urllib2.urlopen(request) result = response.read() dict_result = json.loads(result) if key: return jsonify(dict_result[key]) else: return jsonify(dict_result) except urllib2.HTTPError, e: print str(e) abort(500)
def __send_message(self, input_message='' , emotion = '', strength = '', custom_dict = None): aaa = conv_encoding(input_message) all = "text=" + aaa + "&speaker=haruka" req_data = conv_encoding(all) print req_data print aaa request = urllib2.Request(self.api_url, req_data) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: response = urllib2.urlopen(request) except Exception as e: print e sys.exit() return response
def haredis_pair_notify(self, pair_id=None, notify='update'): """ Notify frontend and backend servers for a pair creat, update or delete create = start update = restart delete = stop """ if pair_id == None: error = 'Redis Pair notify failure: missing pair_id' logger.error(error) raise ValueError(error) if notify != 'create' and notify != 'update' and notify != 'delete': error = 'Redis Pair notify failure: notify value must be create, update or delete' logger.error(error) raise ValueError(error) try: haredis_backend_servers = self.config.get('Service Broker Extension', 'haredis_backend_servers') haredis_frontend_servers = self.config.get('Service Broker Extension', 'haredis_frontend_servers') haredis_notify_user = self.config.get('Service Broker Extension', 'haredis_notify_user') haredis_notify_pass = self.config.get('Service Broker Extension', 'haredis_notify_pass') except NoOptionError: raise backend_servers = re.split('[\s,]+',haredis_backend_servers) backend_servers_len = len(backend_servers) frontend_servers = re.split('[\s,]+',haredis_frontend_servers) frontend_servers_len = len(frontend_servers) servers = backend_servers + frontend_servers notified_servers = 0 for _, server in enumerate(servers): notify_url = "http://{server}:{port}/{notify}/{pair_id}".format(server=server, port=8081, notify=notify, pair_id=pair_id) request = request = urllib2.Request(notify_url) base64string = base64.encodestring('{}:{}'.format(haredis_notify_user, haredis_notify_pass)) request.add_header("Authorization", "Basic %s" % base64string) try: response = urllib2.urlopen(request, timeout = 3) except urllib2.HTTPError, e: logger.error('Redis Pair notify failure: HTTPError {} {}'.format(str(e.code), notify_url)) continue except urllib2.URLError, e: logger.error('Redis Pair notify failure: URLError {} {}'.format(str(e), notify_url)) continue
def cache_resource(self, url): request = urllib2.Request(url) user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.35 Safari/535.1' request.add_header('User-Agent', user_agent) handler = urllib2.urlopen(request) try: resource_type = MIME_TYPES[handler.headers.get('Content-Type')] if not resource_type: raise UnsupportedResourceFormat("Resource format not found") except KeyError: raise UnsupportedResourceFormat("Resource format not supported") etag = handler.headers.get('ETag') last_modified = handler.headers.get('Last-Modified') resource_key = self.get_resource_key(url) stream = handler.read() self.update_resource_params(resource_key, resource_type, etag, last_modified, stream) return stream, resource_type
def is_resource_updated(self, url, etag, last_modified): no_change = (False, None, None) # if no ETag, then check for 'Last-Modified' header if etag is not None and etag != "": request = urllib2.Request(url) request.add_header('If-None-Match', etag) elif last_modified is not None and last_modified != "": request = urllib2.Request(url) request.add_header('If-Modified-Since', last_modified) else: return no_change try: second_try = urllib2.urlopen(request) except urllib2.HTTPError, e: # if http code is 304, no change if e.code == 304: return no_change
def bing_search(self, query): key= 'secret' # bing api key query = urllib.quote(query) # create credential for authentication user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)' credentials = (':%s' % key).encode('base64')[:-1] auth = 'Basic %s' % credentials url = 'https://api.datamarket.azure.com/Data.ashx/Bing/Search/Composite?Sources=%27web%27&Query=%27'+query+'%27&$top=1&$format=json' request = urllib2.Request(url) request.add_header('Authorization', auth) request.add_header('User-Agent', user_agent) request_opener = urllib2.build_opener() response = request_opener.open(request) response_data = response.read() json_result = json.loads(response_data) result_count = int(json_result['d']['results'][0]['WebTotal']) return result_count
def execute_api_call(api_query): # Add the username and password. # If we knew the realm, we could use it instead of None. top_level_url = "https://api.ebay-kleinanzeigen.de/" url = top_level_url + "api/" + api_query username = '******' password = '******' request = urllib2.Request(url) # You need the replace to handle encodestring adding a trailing newline # (https://docs.python.org/2/library/base64.html#base64.encodestring) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) result = urllib2.urlopen(request) # use the opener to fetch a URL return json.load(result)
def get_user_info(self, authorization): """Get qq user info :type authorization: str :param authorization: :rtype: dict :return: user info """ opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(get_config("login.gitcafe.user_info_url")) request.add_header("Authorization", authorization) user_info = json.loads(opener.open(request).read()) if user_info.get("error") is not None: raise Exception(user_info) return user_info
def GetAllWeibo(uid): FP_COOKIE = open("cookies.txt","r") ARGS_COOKIE = FP_COOKIE.readline() FP_COOKIE.close() #each user, accountID is needed tmpUser = User() tmpUser.id = str(uid) tmpUser.domain = str(0) tmpUser.accountID = tmpUser.domain + tmpUser.id if tmpUser.domain == "0": FOLLOW_INIT_PATH = "http://weibo.com/u/" + tmpUser.id rawcontents = fetchUrl(FOLLOW_INIT_PATH) tmpUser.accountID = GetAccountId(tmpUser.id); if(not tmpUser.accountID.isdigit()): # print tmpUser.accountID print 're problem' return (False,'re problem') tmpUser.domain = str(tmpUser.accountID)[0:6] if tmpUser.accountID == "0": print 'get accountID fail' return (False,'get accountID fail') eachUser = tmpUser # init PATH PATH_REAL = "http://weibo.com/p/aj/v6/mblog/mbloglist?id=" + eachUser.accountID + "&domain=" + eachUser.domain + "&page=" allWeiboList = [] #fetch 200 page in default for page in range(1,10): request = urllib2.Request(PATH_REAL + str(page)) request.add_header('Cookie',ARGS_COOKIE) try: response = urllib2.urlopen(request) rawcontents = response.read() except: return (False,'url read error') tmpList = getWeiboList(rawcontents, eachUser.id) if len(tmpList) == 0: break else: allWeiboList.extend(tmpList) return (True,allWeiboList)
def go(self): while not self.stopRequested: temp = self.bmp.readTemperature() pressure = self.bmp.readPressure() / 100.0 out = dict() out["temp"] = temp out["pressure"] = pressure try: opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request("http://192.168.5.1:8088/wx/garage/current", data=json.dumps(out)) request.add_header("Content-Type", "application/json") request.get_method = lambda: "PUT" opener.open(request) self.logger.debug(json.dumps(out)) time.sleep(2) except Exception as e: self.logger.error(e)
def login_to_noisite(username, password): """ In order to login to the API, the user must have an existing account with neworganizing.com. This function passes along the provided username and password to the NOI login form, taking CSRF into account, and if the resulting redirect is not back to the login form, then the authentication was successful. TODO: Enable the neworganizing.com site to react appropriately to application/json requests, and return a proper authentication confirmation. """ if config.DEBUG: return True cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) login_url = "http://{0}/accounts/login/".format(config.NOISITE_DOMAIN) request = urllib2.Request(login_url) request.add_header("User-Agent", "Browser") response = urllib2.urlopen(request) html = response.read() doc = BeautifulSoup(html) csrf_input = doc.find(attrs=dict(name="csrfmiddlewaretoken")) csrf_token = csrf_input["value"] params = urllib.urlencode(dict(username=username, password=password, csrfmiddlewaretoken=csrf_token)) request.data = params response = urllib2.urlopen(request) if response.geturl() == login_url: return False else: return True
def get_groupscore_by_cluster(cluster): ret = { "ok": False, "msg": "", "data": [], } endpoints = Endpoint.search_agent_endpoint_by_cluster(cluster) score = 0 for endpoint in endpoints: p = [] q = { "endpoint": endpoint.endpoint, "counter": "net.port.listen/port=%s"%endpoint.id } p.append(q) method = "POST" handler = urllib2.HTTPHandler() opener = urllib2.build_opener(handler) url = config.QUERY_ADDR + "/graph/last" # print ("post data==>>>%s"%json.dumps(p)) request = urllib2.Request(url, data=json.dumps(p)) request.add_header("Content-Type", "application/json") request.get_method = lambda: method try: connection = opener.open(request) except urllib2.HTTPError,e: connection = e # check. Substitute with appropriate HTTP code. if connection.code == 200: msg = connection.read() jsonmsg = json.loads(msg) value = jsonmsg[0]["value"]["value"] score = score + int(value) #j = { # "endpoint": endpoint.endpoint, # "value": score #} ret['ok'] = True else: print '{"err":1,"msg":"%s"}' % connection ret['ok'] = False
def get_memscore_by_cluster(cluster): ret = { "ok": False, "msg": "", "data": [], } endpoints = Endpoint.search_httpapi_by_cluster(cluster) score = 100 for x in endpoints: url = x.endpoint method = "GET" handler = urllib2.HTTPHandler() opener = urllib2.build_opener(handler) url = url + "/page/memory" request = urllib2.Request(url) request.add_header("Content-Type",'application/json') request.get_method = lambda: method try: connection = opener.open(request) except urllib2.HTTPError,e: connection = e # check. Substitute with appropriate HTTP code. if connection.code == 200: msg = connection.read() # print ("msg===>>>" + msg) jsonmsg = json.loads(msg) if jsonmsg["msg"]=="success": if jsonmsg["data"]: x = jsonmsg["data"][1] y = jsonmsg["data"][0] a = x/float(y) if a > 0.2: score = score - 10 else: score = score - 20 ret['ok'] = True else: print '{"err":1,"msg":"%s"}' % connection ret['ok'] = False