def _request_load_time(): """ Returns request load time """ start = time.time() request.get(POSTS_URL) end = time.time() load_time = end - start return load_time
def _request_load_time(): """ Returns request load time """ initial = time.time() request.get(COMMENTS_URL) final = time.time() load_time = final - initial return load_time
def test_last_modified(self): app = App() @app.get('/last-modified') def handler(): now = datetime.datetime.utcnow().replace(microsecond=0) return "content", ("Last-Modified", now) res = get(app, '/last-modified') assert res['body'] == 'content' assert res['status'] == '200 OK' now = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT") res = get(app, '/last-modified', headers={"If-Modified-Since": now}) assert res['body'] == 'content' assert res['status'].startswith('200') yesterday = datetime.datetime.utcnow() + datetime.timedelta(days=-1) yesterday = yesterday.strftime("%a, %d %b %Y %H:%M:%S GMT") res = get(app, '/last-modified', headers={"If-Modified-Since": yesterday}) assert res['body'] == '' assert res['status'].startswith('304') res = get(app, '/last-modified', headers={"If-Modified-Since": 'yesterday'}) assert res['body'] == 'content' assert res['status'].startswith('200')
def test_event(self): app = App() @app.get('/') def home(emitter): emitter.emit('foo', bar='visited') @app.on('foo') def handle_foo(bar, cookies): cookies.set('event', bar) @app.on(200) def handle_200(cookies): cookies.set('code', 200) @app.on(404) def handle_404(res, req): res.body = '%s not found' % req.path res = get(app, '/') assert res['cookies']['event'].value == 'visited' assert res['cookies']['code'].value == '200' res = get(app, '/foo') assert res['status'] == '404 Not Found' assert res['body'] == '/foo not found'
def test_resource_module(self): app = App() from resources import post app.resource(module=post) res = get(app, '/resources/post/314') assert res['status'] == '200 OK' assert res['body'] == 'post: 314' app = App() from resources import post app.resource('/v1/post', post) res = get(app, '/v1/post/3141') assert res['status'] == '200 OK' assert res['body'] == 'post: 3141' res = get(app, '/v1/post/31415/downvote') assert res['status'].startswith('404') res = patch(app, '/v1/post/31415/upvote') assert res['status'].startswith('200') assert res['body'] == 'upvote: 31415'
def test_type(self): app = App() @app.get('/test/<bar>') def index(bar: int, foo: int=0): return str(bar + foo) assert get(app=app, path='/test/200')['body'] == '200' response = get(app=app, path='/test/200', query={"foo": 1}) assert response['body'] == '201'
def resolve_conflicts(self): """ This is our consensus algorithm, it resolves conflicts by replacing our chain with the longest chain :return: <bool> True if our chain was replaced, false if not """ neighbours = self.nodes new_chain = None #Were only looking for chains longer than ours max_length = len(self.chain) #Grab and verify the chains from all the nodes in our network for node in neighbours: response = request.get(f'http://{node}/chain') if response.status_code == 200: length = response.json()['length'] chain = response.json()['chain'] #check if the length is longer and if the chain is valid if length > max_length and self.valid_chain(chain): max_length = length new_chain = chain #Relace our chain if we discovered a longer valid chain if new_chain: self.chain = new_chain return True return False
def zen(): _api_method = 'zen' """ Github Quotes :return: """ return request.get(_api_method)
def qiushi_spider(self): #page = 1 while True: if self.q.empty(): break else: page = self.q.get() print 'qiushi_spder=', self.threadID, ',page=', str(page) url = 'http://www.qiushibaike.com/8hr/page/' + str(page) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36', 'Accept-Language': 'zh-CN,zh;q=0.8' } timeout = 4 while timeout > 0: timeout -= 1 try: content = request.get(url, headers=headers) data_queue.put(content.text) break except Exception, e: print 'qiushi_spider', e if timeout < 0: print 'timeout', url
def search_string(): url = "" r = request.get(url) data = r.json() return (data[0].name, data[0].address, data[0].city, data[0].city, data[0].state, data[0].postal_code, data[0].review_count, data[0].stars)
def query_index(index_name): url = BASE_URL + '/' + index_name resp = get(url) if resp.code == 200: return resp.read().decode('utf-8') else: return 400
def main(): text = request.get('https://adventofcode.com/2020/day/18/input') inputs = [line.replace(' ', '') for line in text.strip().split('\n')] print( '* Part One:', sum([ shunting_yard(string=line, isoperator=lambda a: a in ('+', '*'), operatorgt=lambda a, b: a != '(', operatordict={ '+': add, '*': mul }) for line in inputs ])) print( '** Part Two:', sum([ shunting_yard(string=line, isoperator=lambda a: a in ('+', '*'), operatorgt=lambda a, b: a == '+' and b == '*', operatordict={ '+': add, '*': mul }) for line in inputs ]))
def resolve_conflicts(self): # 遍历所有的邻居节点,并检查链的有效性, 如果发现有效更长链,就替换掉自己的链。 """ 共识算法解决冲突 使用网络中最长的链 :return: <bool> True如果链被取代,否则为False """ neighbours = self.nodes new_chain = None # we're only looking for chains longer than ours max_length = len(self.chain) # grab and verify the chains from all the nodes in our network for node in neighbours: response = request.get(f'http://{node}/chain') if response.status_code == 200: length = response.json()['length'] chain = response.json()['chain'] # check if the length is longer and the chain is valid if length > max_length and self.valid_chain(chain): max_length = length new_chain = chain # replace our chain if we discovered a new,valid chain longer than ours if new_chain: self.chain = new_chain return True return False
def main(): text = request.get('https://adventofcode.com/2020/day/25/input') card_key, door_key = list(map(int, text.strip().splitlines())) card_lsize = loop_size(card_key) door_lsize = loop_size(door_key) ekey = pow(door_key, card_lsize, 20201227) print('* Part One:', ekey)
def get_image(cls, url): """ Returned Image instance has response url. This might be different than the url param because of redirects. """ from PIL.ImageFile import Parser as PILParser length = 0 raw_image = None with closing(request.get(url, stream=True)) as response: response.raise_for_status() response_url = response.url parser = PILParser() for chunk in response.iter_content(config.CHUNK_SIZE): length += len(chunk) if length > config.IMAGE_MAX_BYTESIZE: del parser raise cls.MaxBytesException parser.feed(chunk) # comment this to get the whole file if parser.image and parser.image.size: raw_image = parser.image del parser # free some memory break # or this to get just the size and format # raw_image = parser.close() if length == 0: raise cls.ZeroBytesException if not raw_image: raise cls.NoImageException image = Image(response_url, raw_image.size, raw_image.format) return image
def main(): text = request.get('https://adventofcode.com/2020/day/19/input') inputs = text.strip().replace('"', '').split('\n\n') rules = to_rules(inputs[0]) messages = inputs[1].splitlines() print('* Part One:', part_one(rules, messages)) print('** Part Two:', part_two(rules, messages))
def start_requests(self): data = { "shopIds": "288146005", "catId": "txd_10017653", "catIds": "[{\"backendCatId\":\"127530071,127530073\",\"catId\":\"txd_10017653\",\"categoryType\":\"3\",\"displayProperties\":\"0\",\"enableNisitc\":\"0\",\"enableOrder\":\"0\",\"extend\":{\"hasInventoryItemCount\":\"26\",\"noInventoryItemCount\":\"0\",\"hasInventoryItemCountStrategy\":\"GICfBE\",\"noInventoryItemCountStrategy\":\"GICfBE\"},\"firstCatId\":\"txd_10017249\",\"isIgraph\":\"0\",\"itemCount\":\"26\",\"noInventoryItemCount\":\"0\",\"parentCatId\":\"txd_10017274\",\"ruleIds\":[],\"ruleWeight\":\"0\",\"tags\":\"\",\"title\":\"家常叶菜\",\"totalItemCount\":\"26\",\"type\":\"0\"}]", "pagination": "-1-51-1-0", "busiType": "classify", "order": "", "needProperties": 0 } params = { 'jsv': '2.5.0', 'appKey': 12574478, 't': int(time.time() * 1000), 'sign': 'dccded4f1e98f36940da3f6a723fbe46', 'v': 1.0, 'dataType': 'jsonp', 'timeout': 10000, 'api': 'mtop.wdk.classify.txdqueryclassifypage', 'jsonpIncPrefix': 'weexcb', 'ttid': '2020@weex_h5_1.0.36', 'type': 'jsonp', 'callback': 'mtopjsonpweexcb6', 'data': str(data) } response = request.get( url= 'https://h5api.m.taobao.com/h5/mtop.wdk.classify.txdqueryclassifypage/1.0/', params=params) a = 1
def get_twitter_img(user): url = "https://twitter.com/"+user html = subprocess.getouput("phantomjs html.js"+url) image = find_between(html, '<img class="ProfileAvatar-image " src="', '" alt="') r = request.get(image) with open('Twitter.jpg', 'wb') as f: f.write(r.content)
def get_sites(target): url = URL + "/same?s=" + target res = request.get(url) pattern = re.compile("<a.*?href=\"(.*?)\".*?rel=\"nofollow\">\d+</a>") pages = pattern.findall(res.text) for p in pages: pass
def maps(self): response = request.get( "https://maps.googleapis.com/maps/api/geocode/json?address=adresse+openclassrooms&key=AIzaSyALFscZvQOVMBXm_0hwRR5EQcqaZLuTpE0" ) if response.ok: return response.txt else: return 'Bad Response'
def main(): text = request.get('https://adventofcode.com/2020/day/20/input') inputs = text.strip().replace('Tile ', '').split('\n\n') tiles = to_tiles(inputs) adjacent, adjacent_tileside = connect_adjacent(tiles) tileid_image = reassemble_tileid_image(adjacent) print('* Part One:', part_one(tileid_image)) print('** Part Two:', part_two(tiles, adjacent_tileside, tileid_image))
def test_params(self): app = App() @app.get('/') def index(q: str): return q assert get(app=app, path='/', query={"q": "str"})['body'] == 'str'
def get(self, *args, **kwargs): """ :param github_username: :return: """ content = request.get(self._api_method, **kwargs) return GithubUserModel(content)
def test_ajax(self): app = App() @app.get('/api') def ajax(req): return "ajax" if req.is_ajax else "not ajax" res = get(app, '/api', headers={"X-Requested-With": "XMLHttpRequest"}) assert res['body'] == 'ajax'
def test_cookie(self): app = App() @app.get('/test') def test(cookies): return cookies.get('id') @app.get('/test-set') def test2(cookies): cookies.set('foo', 'bar') return '' @app.get('/test-expires') def test3(cookies): cookies.set_for_30_days('foo', 'bar', httponly=True) return '' @app.get('/test-delete') def test4(cookies): cookies.delete('id') return '' assert get(app, '/test', cookies={"id": "foo"})['body'] == "foo" expected = [('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 'foo=bar')] res = get(app, '/test-set', cookies={"id": "foo"}) assert res["status"] == "200 OK" assert res["headers"] == expected fmt = "%a, %d %b %Y %H:%M:%S GMT" expires = (datetime.datetime.utcnow() + datetime.timedelta(days=30)).strftime(fmt) expected = [('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 'foo=bar; expires=%s; httponly' % expires)] response = get(app, '/test-expires', cookies={"id": "foo"}) assert response['headers'] == expected expires = datetime.datetime.utcfromtimestamp(0).strftime(fmt) expected = [('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 'id=''; expires=%s' % expires)] res = get(app, '/test-delete', cookies={"id": "foo"}) assert res["status"] == "200 OK" assert res["headers"] == expected
def test_session(self): app = App() @app.get('/test') def test(session): if session.get('userid'): return 'userid: %s' % session.get('userid') return 'not logged in' @app.post('/login') def login(body, session): if body['username'] == 'admin' and body['passwd'] == 'secret': session.set('userid', 123) return 'logged in' return 'failed' @app.post('/logout') def logout(session): if session.get('userid'): session.destroy() return 'logged out' res = post(app, '/login', {'username': '******', 'passwd': 'secret'}) assert res['status'] == '200 OK' assert res['body'] == 'logged in' assert res['headers'][1][0] == 'Set-Cookie' assert res['headers'][1][1].startswith('ksid=') sid = res['headers'][1][1][5:] res = get(app, '/test') assert res['status'] == '200 OK' assert res['body'] == 'not logged in' res = get(app, '/test', cookies={"ksid": sid}) assert res['status'] == '200 OK' assert res['body'] == 'userid: 123' res = post(app, '/logout', cookies={"ksid": sid}) assert res['status'] == '200 OK' assert res['body'] == 'logged out' res = get(app, '/test', cookies={"ksid": sid}) assert res['status'] == '200 OK' assert res['body'] == 'not logged in'
def download(url): resp = get(url) content_type = resp.headers.get('Content-Type') charset = re.findall(r'(gbk|gbk2312|utf-8)', content_type, re.I)[0] if resp.code == 200: html = resp.read().decode(charset) parse(html)
def main(): text = request.get('https://adventofcode.com/2020/day/24/input') inputs = [ re.findall('e|se|sw|w|nw|ne', line) for line in text.strip().splitlines() ] tiles = np.zeros((200, 200), np.bool) print('* Part One:', part_one(tiles, inputs)) print('** Part Two:', part_two(tiles))
def checking(self,emails): headers = {'User-Agent': self.my_fake_s.chrome} urls = "" #url api web = request.get(urls+emails, headers=headers) if self.live in web.content: print (Fore.Green) ,' ->',self.version,'-',datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'- LIVE - '-emails) self.save(emails) else: print (Fore.RED)
def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', help='the url', metavar='URL', required=True) args = parser.parse_args() url = args.url res = request.get(url + '/processlist') request.print_response(res)
def test_hello(self): app = App() @app.get('/hello/<name>') def hello(name: str, times: int=1): return "hello " * times + name response = get(app, '/hello/klar', {"times": 2}) assert response['body'] == "hello hello klar"
def download(url): resp = get(url) if resp.code == 200: # encode_type = resp.headers.get('Content-Type') # charset = re.findall(br'charset=(.+)',resp.read()) # print(charset) html = resp.read() # print(html) parse(html, url)
def main(): text = request.get('https://adventofcode.com/2020/day/21/input') inputs = [(set(ingredient[1].split()), set(ingredient[0].split())) for ingredient in [ line.split(' (contains ') for line in text.strip().replace( ')', '').replace(',', '').split('\n') ]] allergens, count = part_one(inputs) print('* Part One:', count) print('** Part Two:', part_two(allergens))
def test_response(self): app = App() @app.get('/') def test(): return 404 res = get(app, '/') assert res['status'] == '404 Not Found'
def test_resources(self): app = App() from resources import post app.resources(post, prefix='/v2') res = get(app, '/v2/post/31') assert res['status'] == '200 OK' assert res['body'] == 'post: 31'
def test_template_rendering(self): app = App() import templates.test @app.get('/test') def test(key) -> templates.test: return {"key": key} assert get(app, '/test', {"key": "foo"})['body'] == "key is foo\n"
def update(self): while True: data = '' urls = [ "https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=10000&ssl=yes" ] for url in urls: data += request.get(url).text self.splited += data.split("\r\n") #scrapping proxy time.sleep(600)
def handl(msg): chat_id = msg['chat']['id'] command = msg['text'] content_type, chat_type, chat_id = telepot.glance(msg) print(content_type,chat_type,chat_id,command) if command == '/time508' # 1) Perform query req = request.get('https://www.zaragoza.es/sede/servicio/urbanismo-infraestructuras/transporte-urbano/poste-autobus/tuzsa-508?rf=html&srsname=wgs84') else : bot.sendMessage(chat_id,'This command does not exist')
def envio_ID(): tiempo = datetime.datetime.now() servidor = "192.168.X.X" #Aqui su servidor puerto = 80 #Aqui el puerto de su servidor url = index?Date= + str(tiempo) + "&clientID=" + str(ID) completo = 'http://' + servidor + "/" + url try: envio = request.get(completo) except Exception as Error: pass
def test_routing(self): app = App() @app.get('/') def index(): return 'index' @app.get('/foo') def foo(): return 'foo' @app.route('/bar', methods=['get', 'post', 'put']) def bar(req): return '%s: %s' % (req.method, req.path) assert get(app, '/')['body'] == 'index' assert get(app, '/foo')['body'] == 'foo' assert get(app, '/baz')['status'].startswith('404') assert get(app, '/bar')['body'] == 'GET: /bar' assert post(app, '/bar')['body'] == 'POST: /bar'
def test_response_processing(self): app = App() def jsonp(req, res): callback = req.query.get('callback') if callback: res.body = "%s(%s)" % (callback, json.dumps(res.body)) res.header("Content-Type", "application/javascript") @app.get('/') def handler() -> jsonp: return {'key': 'value'} res = get(app, '/', dict(callback='cb')) assert res['body'] == 'cb({"key": "value"})' assert res['headers'] == [("Content-Type", "application/javascript")] res = get(app, '/') assert res['status'] == '200 OK' assert res['body'] == '{"key": "value"}'
def test_etag(self): app = App() @app.get('/etag') def handler(req) -> etag: return "content" res = get(app, '/etag') assert res['body'] == 'content' assert res['status'] == '200 OK' headers = dict(res['headers']) res = get(app, '/etag', headers={"If-None-Match": headers['Etag']}) assert res['status'].startswith('304') assert res['body'] == '' headers = dict(res['headers']) res = get(app, '/etag', headers={"If-None-Match": 'etag'}) assert res['status'].startswith('200') assert res['body'] == 'content'
def callRequest(method, url, parameters=None, data=None): choice = switchMethod(method) if choice == 2: return str(request.post(url, parameters, data)) elif choice == 3: return str(request.put(url, parameters, data)) elif choice == 4: return str(request.patch(url, parameters, data)) elif choice == 5: return str(request.delete(url, parameters, data)) else: return str(request.get(url, parameters))
def dataStream(self,prevClose,openPrice,weekLo,weekHi,weekAvg,monthAvg,yearAvg): data = { prevClose:'', openPrice:'', weekLo:'', weekHi:'', weekAvg:'', monthAvg:'', yearAvg:'', } stream = request.get(alphavantageUrl + data.json) print(stream)
def test_resource_cls(self): app = App() @app.resource('/v1/post') class PostResource: def show(post_id): return 'post: %s' % post_id @method('patch') def like(post_id): return 'liked: %s' % post_id res = get(app, '/v1/post/31415') assert res['status'] == '200 OK' assert res['body'] == 'post: 31415' res = get(app, '/v1/post/3141/like') assert res['status'].startswith('404') res = patch(app, '/v1/post/3141/like') assert res['status'].startswith('200')
def __call__(self, image): # image = super(MonoImageFilter, self).__call__(image) try: if MonoImageFilter.regex.search(image.url): content = request.get(image.url).content pic = StringIO(content) raw_image = PIL.Image.open(pic) MonoImageFilter.check_color(raw_image) del raw_image # more cleaning maybe logger.debug("Good image (%s): %s", clsn(self), image.url) return image except Exception, e: logger.debug("Bad image (%s): %s", clsn(e), image.url) pass
def __call__(self, image): # image = super(FormatImageFilter, self).__call__(image) try: if image.format.lower() == "gif": content = request.get(image.url).content pic = StringIO(content) raw_image = PIL.Image.open(pic) FormatImageFilter.check_animated(raw_image) del raw_image logger.debug("Good image (%s): %s", clsn(self), image.url) return image except Exception, e: logger.debug("Bad image (%s): %s", clsn(e), image.url) pass
def trade_spider(max_pages): page=1 while page < max_pages: url= "https://buckyroom.org/trade/search.php?page="+str(page) source_code=request.get(url) #all the page source is now stored in the source_code variable plain_text=source_code.text soup=BeautifulSoup(plain_text)#creating a beautiful soup object #go to page source to find all the links for specific articles to browse in a class for link in soup.findAll('a',{'class': 'item-name'}): #getting all the titles href= "http://buckyroom.org/"+link.get(href)#we want only the data in the href title=link.string() #print(href) #print(title) page += 1
def simple_get(url): """ Attempts to get the content at `url` by making an HTTP GET request. If the content-type of response is some kind of HTML/XML, return the text content, otherwise return None. """ try: with closing(get(url, stream=True)) as resp: return resp.content except RequestException as e: log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None
def __get_data(self, api, action): url = u'{0}{1}'.format(api, action) self.util.msg_log(u'api request: {0}'.format(url)) #pyperclip.copy(url) # url = u'{0}{1}'.format(self.api, unicodedata.normalize('NFKD', action)) try: url = self.util.remove_newline(url) response = requests.get( url , headers=self.ua_chrome , verify=False , proxies=self.settings.get_proxies()[1] , timeout=self.settings.request_timeout ) except requests.exceptions.ConnectTimeout as cte: #self.util.msg_log(u'{0}\n{1}\n\n\n{2}'.format(cte, dir(cte), cte.message)) return False, self.util.tr(u'cc_connection_timeout').format(cte.message) except requests.exceptions.ConnectionError as ce: self.util.msg_log(u'ConnectionError:{0}'.format(ce)) return False, ce except UnicodeEncodeError as uee: self.util.msg_log(u'msg:{0} enc:{1} args:{2} reason:{3}'.format(uee.message, uee.encoding, uee.args, uee.reason)) return False, self.util.tr(u'cc_api_not_accessible') except: self.util.msg_log(u'Unerwarteter Fehler beim Request: {0}'.format(sys.exc_info()[0])) return False, self.util.tr(u'cc_api_not_accessible') if response.status_code != 200: return False, self.util.tr(u'cc_server_fault') try: result = json.loads(response.text) except TypeError as te: self.util.msg_log(u'Unerwarteter Fehler: {0}'.format(te.message)) return False, self.util.tr(u'cc_api_not_accessible') except AttributeError as ae: self.util.msg_log(u'Unerwarteter Fehler: {0}'.format(ae.message)) return False, self.util.tr(u'cc_api_not_accessible') except: self.util.msg_log(u'Unerwarteter Fehler: {0}'.format(sys.exc_info()[0])) return False, self.util.tr(u'cc_invalid_json') if result['success'] is False: return False, result['error']['message'] return True, result['result']
def load_raw_rules(cls, url): "Load raw rules from url or package file." raw_rules = [] filename = url.split('/')[-1] # e.g.: easylist.txt try: with closing(request.get(url, stream=True)) as file: file.raise_for_status() # lines = 0 # to be removed for rule in file.iter_lines(): raw_rules.append(rule.strip()) # lines += 1 # tbr # if lines == 2500: break # tbr, only for windoze with no re2 logger.info("Adblock online %s: %d", filename, len(raw_rules)) except: # file server down or bad url with open(resource_filename('summary', filename), 'r') as file: for rule in file: raw_rules.append(rule.strip()) logger.info("Adblock offline %s: %d", filename, len(raw_rules)) return raw_rules
def __get_data(self, api, action): url = u"{0}{1}".format(api, action) self.util.msg_log(u"api request: {0}".format(url)) # pyperclip.copy(url) # url = u'{0}{1}'.format(self.api, unicodedata.normalize('NFKD', action)) try: # response = urllib2.urlopen(url) response = requests.get( url, verify=False, proxies=self.settings.get_proxies()[1], timeout=self.settings.request_timeout ) except requests.exceptions.ConnectTimeout as cte: # self.util.msg_log(u'{0}\n{1}\n\n\n{2}'.format(cte, dir(cte), cte.message)) return False, self.util.tr(u"cc_connection_timeout").format(cte.message) except UnicodeEncodeError as uee: self.util.msg_log( u"msg:{0} enc:{1} args:{2} reason:{3}".format(uee.message, uee.encoding, uee.args, uee.reason) ) return False, self.util.tr(u"cc_api_not_accessible") except: self.util.msg_log(u"Unerwarteter Fehler beim Request: {0}".format(sys.exc_info()[0])) return False, self.util.tr(u"cc_api_not_accessible") if response.status_code != 200: return False, self.util.tr(u"cc_server_fault") try: result = json.loads(response.text) except TypeError as te: self.util.msg_log(u"Unerwarteter Fehler: {0}".format(te.message)) return False, self.util.tr(u"cc_api_not_accessible") except AttributeError as ae: self.util.msg_log(u"Unerwarteter Fehler: {0}".format(ae.message)) return False, self.util.tr(u"cc_api_not_accessible") except: self.util.msg_log(u"Unerwarteter Fehler: {0}".format(sys.exc_info()[0])) return False, self.util.tr(u"cc_invalid_json") if result["success"] is False: return False, result["error"]["message"] return True, result["result"]
def getAllRoundsForUser(userID, page): r = request.get(base_url + 'rounds/user/' + str(userID) + '/' + str(page), auth = ('cse3213', 'test')) bottle.response.status = r.status_code return r.json
__author__ = 'Ben' import request r = request.get("http://localhost:82/cgi-bin/printenv.py") print(r.text)
import os.path import time import json import subprocess import request request_path = "./route_request_sms" if __name__ == "__main__": while (True): while not os.path.exists(request_path): time.sleep(1) for sms_request in os.listdir(request_path): with open(os.path.join(request_path,sms_request)) as data_file: data = json.load(data_file) origin = data["from"] destination = data["to"] response = json.loads(request.get("htttp://localhost:3002/api/recommendation/"+origin+"/"+destination)) subprocess.call(["php","-f","send_sms.php",data["number"], response["bestRoute"]])
def getAllUsers(): r = request.get(base_url + 'users/', auth = ('cse3213', 'test')) bottle.response.status = r.status_code return r.json
def getAllRounds(page): r = request.get(base_url + 'rounds/all/' + page, auth = ('cse3213', 'test')) bottle.response.status = r.status_code return r.json
def download_resource(self, url, resource_format, dest_file, delete): try: # if resource_format is not None: # if resource_format.lower() == 'georss': # dest_file += '.xml' if delete is True: os.remove(dest_file) # name, hdrs = urllib.urlretrieve(url, dest_file) response = requests.get( url, verify=False, stream=True, proxies=self.settings.get_proxies()[1], timeout=self.settings.request_timeout, ) if not response.ok: return False, self.util.tr(u"cc_download_error").format(response.reason), None # TODO remove after testing # doesn't work headers is object of type 'request.structures.CaseInsensitiveDict' # self.util.msg_log(u'{0}'.format(json.dumps(response.headers, indent=2, sort_keys=True))) for k, v in response.headers.iteritems(): self.util.msg_log(u"['{0}']: \t{1}".format(k, v)) # Content-Disposition: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html # http://www.iana.org/assignments/cont-disp/cont-disp.xhtml file_name_from_service = self.__file_name_from_service( url, response.headers.get("content-disposition"), response.headers.get("content-type") ) self.util.msg_log(u"file name from service: {0}".format(file_name_from_service)) if file_name_from_service: # set new dest_file name dest_file = os.path.join(os.path.dirname(dest_file), file_name_from_service) # hack for WFS/WM(T)S Services, that don't specify the format as wms, wmts or wfs url_low = url.lower() if "wfs" in url_low and "getcapabilities" in url_low and False is dest_file.endswith(".wfs"): dest_file += ".wfs" if "wmts" in url_low and "getcapabilities" in url_low and False is dest_file.endswith(".wmts"): dest_file += ".wmts" # we use extension wmts for wms too if "wms" in url_low and "getcapabilities" in url_low and False is dest_file.endswith(".wmts"): dest_file += ".wmts" # if file name has been set from service, set again after above changes for wfs/wm(t)s if file_name_from_service: # set return value to full path file_name_from_service = dest_file # chunk_size = 1024 chunk_size = None # http://docs.python-requests.org/en/latest/user/advanced/#chunk-encoded-requests if self.__is_chunked(response.headers.get("transfer-encoding")): self.util.msg_log("response is chunked") chunk_size = None with open(dest_file, "wb") as handle: for chunk in response.iter_content(chunk_size): if chunk: handle.write(chunk) return True, "", file_name_from_service except requests.exceptions.ConnectTimeout as cte: # self.util.msg_log(u'{0}\n{1}\n\n\n{2}'.format(cte, dir(cte), cte.message)) return False, self.util.tr(u"cc_connection_timeout").format(cte.message) except IOError, e: self.util.msg_log("Can't retrieve {0} to {1}: {2}".format(url, dest_file, e)) return False, self.util.tr(u"cc_download_error").format(e.strerror), None
def getRound(id): r = request.get(base_url + 'rounds/' + str(id), auth = ('cse3213', 'test')) bottle.response.status = r.status_code return r.json
def get_from_odl(url, user, pw): return request.get(url, user, pw)