def url_size(url, faker=False, headers={}): if faker: response = urlopen_with_retry( request.Request(url, headers=fake_headers)) elif headers: response = urlopen_with_retry(request.Request(url, headers=headers)) else: response = urlopen_with_retry(url) size = response.headers['content-length'] return int(size) if size is not None else float('inf')
def check_for_new_species(self, node): """ Checks for new entries in the VAMDC database node which are not available in the local sqlite3 database. :ivar nodes.Node node: VAMDC database node which will be checked for updates """ counter = 0 cursor = self.conn.cursor() request = r.Request(node = node) result = request.getspecies() for id in result.data['Molecules']: try: cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, PF_Timestamp FROM Partitionfunctions WHERE PF_SpeciesID=?", [(id)]) exist = cursor.fetchone() if exist is None: print("ID: %s" % result.data['Molecules'][id]) counter += 1 except Exception as e: print(e) print(id) print("There are %d new species available" % counter)
def login(): login_url = "https://security.kaixin001.com/login/login_post.php" data = { "email":"13119144223", "password": "******" } # 对post的data内容进行编码 data = parse.urlencode(data) # http协议的请求头 headers = { "Content-Length": len(data), "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36" } # 构造请求Request对象 # data要求是一个bytes对象,所以需要进行编码 req = request.Request(login_url, data=data.encode(), headers=headers) rsp = opener.open(req) html = rsp.read() html = html.decode()
def __upload_image__(self, data): self.lock.acquire() req = request.Request(req=data) total = req.args['total'] imagedata = "" inp = self.conn.recv(10240) while inp: imagedata += inp if len(imagedata) >= total: break else: got = len(imagedata) remain = total - got if remain < 10240: inp = self.conn.recv(remain) else: inp = self.conn.recv(10240) image_binary = base64.b64decode(imagedata) with Image(blob=image_binary) as img: for c in self.app.design.cmps: self.app.callMethod(c.id, c.method, img) output = BytesIO() img.save(file=output) # output.read().encode('base64') imageStr = base64.b64encode(output.getvalue()) total = len(imageStr) self.conn.send(json.dumps({"total": total})) self.conn.sendall(imageStr) self.lock.release()
def download_page(url): version = (3, 0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib, request #urllib library for Extracting web pages try: headers = {} headers[ 'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib, request.Request(url, headers=headers) resp = urllib, request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers[ 'User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers=headers) response = urllib2.urlopen(req) page = response.read() return page except: return "Page Not found"
def wikiscrape(): req = urr.Request('https://en.wikipedia.org/wiki/List_of_data_breaches') content = urr.urlopen(req) cs = content.info().get_content_charset() # get charset of webpage html = content.read().decode(cs) # consists of html source code print("Downloading...") soup = BeautifulSoup(html, 'html.parser') data = [] tabclasses = [] tables = soup.findAll("table") # "class" : "wikitable" for index, tab in enumerate(tables): data.append([]) tabclasses.append(tab.attrs) for ind, items in enumerate(tab.find_all("tr")): cols = items.find_all(["th", "td"]) cols = [ele.text.strip() for ele in cols] data[index].append([ele for ele in cols if (ele != [])]) df = pd.DataFrame() for rows in data: df = df.append(rows) df = df.replace(r'\n', '', regex=True) print('N dimensions of data {}'.format(df.shape)) print('Function call complete') return df
def __init__(self, league_id, year, season_length, espn_s2=None, swid=None): self.request = request.Request(league_id, year) self.season_length = season_length self.league_id = league_id self.year = year self.teams = {} self.espn_s2 = espn_s2 self.swid = swid self.season_bench_points = {} self.avg_player_scores = { 'RB': 0, 'WR': 0, 'QB': 0, 'TE': 0, 'K': 0, 'D/ST': 0 } self._fetch_league()
def get_content(url, headers={}, decoded=True): """Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) response = urlopen_with_retry(req) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1(response.getheader('Content-Type', ''), r'charset=([\w-]+)') if charset is not None: data = data.decode(charset, 'ignore') else: data = data.decode('utf-8', 'ignore') return data
def __init__(self, config): self.username, self.password, self.host, self.user_agent = config self.request = request.Request(config) self.anime = None self.manga = None
def test_requests(request_classifier): """A range of tests to try with our classifier. Parameters ---------- request_classifier : object Our request classifier to work with. """ requests = [] requests.append((request.Request([ '195.154.169.9', '-', '-', datetime(2016, 4, 10, 4, 46, 40, tzinfo=pytz.utc), 'GET', '/', '200', '42751', '-', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0' ]), 'OK')) print('') request_number = 0 for request_item in requests: print('REQUEST #' + str(request_number) + ' ~') request_classifier.try_classify_request(request_item) request_number += 1 print('')
def get_edited_draft(self, media): self.update_from_server() if self.edited_drafts[media]: draft = request.Request( request_base_id=self.edited_drafts[media][0]) else: draft = None return draft
def fetch(self): while True: try: _, job, handle, args = self.jobs.get() response = request.Request(url=job).response.content handle(response, args=args) finally: self.jobs.task_done()
def getRequestToken(self): req = request.Request(self.api.getRequestTokenVerb(), self.api.getRequestTokenEndpoint()) tok = token.Token("", "") req.addOAuthParam(oauth.CALLBACK, oauth.OUT_OF_BAND) self.addOAuthParams(req, tok) self.appendSignatur(req)
def run(self): while True: yield Simulation.hold, self, req = request.Request(self.numRequest) self.numRequest += 1 self.client.sendRequest(req, self.server) yield Simulation.hold, self, numpy.random.poisson(1.0)
def send_request(req_body): global requests_to_send req = request.Request(req_body) requests_to_send.put(req) while req.waiting: time.sleep(0.1) #print("about to return from send_request") return req.response_obj
def make_args(self, action, route=[], body=[]): result = self.check_args(action, route, body) if not result: return None final_route = self._base + self.get_route(action, route) data = self.get_data(action, body) final_body = self.make_body(data) req = request.Request(final_route, action["method"], final_body) return req
def spam_form(url, times): r = request.Request() try: r.call(url, times) except Exception as err: print(bcolors.colors.FAIL + str(err) + bcolors.colors.ENDC) usage() sys.exit(2)
def check_for_updates(self, node): """ Checks for each database entry if an update for the molecular or atomic specie is available in the specified VAMDC database node. :ivar nodes.Node node: VAMDC database node which will be checked for updates """ count_updates = 0 counter = 0 #species_list = [] cursor = self.conn.cursor() cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, datetime(PF_Timestamp) FROM Partitionfunctions ") rows = cursor.fetchall() num_rows = len(rows) query = q.Query() request = r.Request() for row in rows: counter += 1 print("%5d/%5d: Check specie %-55s (%-15s): " % (counter, num_rows, row[0], row[1]), end=' ') #id = row[1] vamdcspeciesid = row[2] # query_string = "SELECT ALL WHERE VAMDCSpeciesID='%s'" % vamdcspeciesid query_string = "SELECT ALL WHERE SpeciesID=%s" % row[1][6:] request.setquery(query_string) request.setnode(node) try: changedate = request.getlastmodified() except r.TimeOutError: print("TIMEOUT") continue except r.NoContentError: print("ENTRY OUTDATED") changedate = None continue except Exception as e: print("Error in getlastmodified: %s " % str(e)) print("Status - code: %s" % str(request.status)) changedate = None continue tstamp = parser.parse(row[3] + " GMT") if changedate is None: print(" -- UNKNOWN (Could not retrieve information)") continue if tstamp < changedate: print(" -- UPDATE AVAILABLE ") count_updates += 1 else: print(" -- up to date") if count_updates == 0: print("\r No updates for your entries available") print("Done")
def request_1(self): method = "POST" path = "/install.php?finish" headers = {'Referer': '{{referer}}'} headers = '''{{'Referer': '{}'}}'''.format(self.referer) data = "__typecho_config={{payload}}" data = "__typecho_config={}".format(self.payload) connection = request.Request(self.url, method, {}, headers, path, data) responses = connection.request() print(responses)
def raw(did, cid, *data, **kwargs): req = request.Request(s.seq, *data) req.did = did req.cid = cid if 'fmt' in kwargs: req.fmt = kwargs['fmt'] res = s.write(req) print 'request: %s', repr(req.bytes) print 'response: %s', repr(res.data) return res
def get_course_list_div(self, link): referer_agent_response = request.Request( link).get_response_using_referer() if referer_agent_response['referer'] is not None: soup = BeautifulSoup(referer_agent_response['referer'].content, features="html.parser") course_list_div = soup.find( 'div', { 'class': 'response-results courses-listing1 clearfix' }).find_all('div', {'class': 'wrap_post_course'}) return course_list_div
def get_access_token(): url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=ZrjLfF5Rh7pOL66gaOmDGnXn&client_secret=16bac9645093ca2632ebb81015ff7544" req = request.Request(url, method="POST") resp = request.urlopen(req) data = resp.read().decode('utf-8') json_data = json.loads(data) global bda_access_token bda_access_token = json_data['access_token'] return bda_access_token
def handleHttpReq(self, method, **param): mParam = copy.deepcopy(param) try: req = request.Request(method, mParam) PublicCloud = param[u'name'] q = self.__cloud_dict[PublicCloud] q.put(req) res = req.getResponse() return res except KeyError: res = dict(result=1, error_code=errors.UNSUPPORTED_CLOUD) return json.dumps(res)
def get_conversation(self, target_id): resp = None try: self._open_socket() self._socket.connect(self._address) content = { 'requester': str(self._id), } if self._privkey is None: req = request.Request(RequestType.RECEIVE_PRIVKEY, json.dumps(content)) self._socket.send(str(req).encode()) self._privkey = self.get_response() content['interlocutor'] = str(target_id) if self._target_pubkey is None: req = request.Request(RequestType.RECEIVE_PUBKEY, json.dumps(content)) self._socket.send(str(req).encode()) self._target_pubkey = self.get_response() req = request.Request(RequestType.RECEIVE_MSGS, json.dumps(content)) self._socket.send(str(req).encode()) resp = self.get_response() except socket.error as err: print('Error {}: {}'.format(err.errno, err.strerror)) except (KeyboardInterrupt, SystemExit): pass finally: self._close_socket() if resp is None or self._privkey is None: return None conversation = sorted(json.loads(resp), key=lambda c: c['timestamp'], reverse=False) for m in conversation: m['message'] = mcrypto.decrypt(m['message'], self._privkey) return conversation
def simulateOneServer(file): """Simulates one server processing list of network requests Args: file (web response): CSV web response retrieved from argument Attributes: readCSV (csv object): Interable CSV object web_server (Server instance): Instance of Server Class server_queue (Queue instance): Instnace of Queue class waiting_times (list): List of wait times Returns: Returns the average wait time of the requests "Average Wait 2477.81 secs 4975 requests remaining." Examples: """ readCSV = csv.reader(file) web_server = server.Server() server_queue = queue.Queue() waiting_times = [] for row in readCSV: # The time when a request came in arrival_time = int(row[0]) # The amount of time needed to process request process_amount = int(row[2]) # create new tasks for the arrival-time server_request = request.Request(arrival_time) # add to queue server_queue.enqueue(server_request) # If the printer is not busy and a task is waiting if (not web_server.busy()) and (not server_queue.is_empty()): # Remove the next task from the print queue next_request = server_queue.dequeue() # add wait times to list, the current counter - arrival time. waiting_times.append(next_request.wait_time(arrival_time)) # assign task to the printer. NEW. Second param is time remaining aka col 3 web_server.start_next(next_request, process_amount) # incrimennt down time remaining based on start_next(process_amount) web_server.tick() average_wait = float(sum(waiting_times)) / len(waiting_times) print("Average Wait %6.2f secs %3d requests remaining." %(average_wait, server_queue.size()))
def test_fail_on_invalid_params(self): ''' Should fail on various errors ''' p1 = request_param.RequestParam( 'latitude', unit='lat degrees', description='event latitude', validators=[validators.ValidatorNumberRange(20, 30)]) p2 = request_param.RequestParam( 'longitude', unit='longitude degrees', description='event longitude', validators=[validators.ValidatorNumberRange(40, 60)]) rq = request.Request(parameters=[p1, p2]) rq_params = StubRequestArgumentBag({'latitude': 'w0', 'longitude': 70}) rq.bind(rq_params) rq.validate() nt.eq_(rq.is_valid, False) nt.eq_(rq.errors[0][1].message, 'Supplied parameter <w0> is not a valid float value') nt.eq_( rq.errors[1][1].message, 'Supplied parameter <70.0> is greater than maximum value of 60') rq_params = StubRequestArgumentBag({'latitude': 25, 'longitude': 50}) rq.bind(rq_params) rq.validate() nt.eq_(rq.is_valid, True) def pv1(request_object): ''' silly validator raises if lat is smaller than lon ''' lat = request_object.getParam('latitude') lon = request_object.getParam('longitude') if lat.value < lon.value: raise validators.ValidatorGlobalError( 'lat <{}> is smaller than lon <{}>'.format( lat.value, lon.value)) rq.addPostValidator(pv1) rq.validate() nt.eq_(rq.is_valid, False) nt.eq_(rq.global_errors[0].message, 'lat <25.0> is smaller than lon <50.0>')
def callback(ch, method, properties, body): print " [*] WORKER: INFO -- Starting worker" print " [*] WORKER: INFO -- send body to sitewhere" print " [*] BODY: %s" % (body) devtoken = getDevToken(body) assignment = getDeviceSpecs(devtoken) # prepare header header = { "Authorization": "Basic YWRtaW46cGFzc3dvcmQ=", "Accept-Encoding": "gzip, deflate", "Accept-Language": "en-US,en;q=0.8", "X-SiteWhere-Tenant": "sitewhere1234567890", "Content-Type": "application/json", "Accept": "application/json, text/javascript, */*; q=0.01", "X-Requested-With": "XMLHttpRequest", "Connection": "keep-alive" } config = conf.configLoader('%s/config.conf' % (os.path.dirname(os.path.abspath(__file__)))) ip_address = config['SITEWHERE']['ipaddr'] commandtoken = config['SITEWHERE']['commandtoken'] print commandtoken, devtoken, assignment if assignment: sender = request.Request( 'http://%s:5000/sitewhere/api/assignments/%s/invocations' % (ip_address, assignment)) data = { "initiator": "REST", "initiatorId": "admin", "target": "Assignment", "commandToken": commandtoken, "status": "Pending", "metadata": {}, "parameterValues": { "greeting": devtoken, "loud": "false" } } data = json.dumps(data) print " [*] WORKER: INFO -- send data to sitewhere" res = sender.sendPostRequest(header, data) print res ch.basic_ack(delivery_tag=method.delivery_tag)
def test(target_url): import requests headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' } r = requests.get(target_url, allow_redirects=False, headers=headers) logging.info('%s %s %d %s' % (r.status_code, r.url, len(r.text), r.headers)) mydc = DC(".") myreq = request.Request() myreq.http_code, myreq.headers, myreq.body = r.status_code, r.headers, r.content myreq.target_url, myreq.cur_url = target_url, r.url resp = mydc.judge(myreq) logging.info(resp)
def get_courses(self): referer_agent_response = request.Request( self.url).get_response_using_referer() if referer_agent_response['referer'] is not None: link = self.get_all_course_link(referer_agent_response['referer']) course_categorys = self.get_course_category(link) if course_categorys: root_path = self.create_directory() for category in course_categorys: if category: all_courses_by_category, category_path = self.get_all_course_by_cateory( category['href'], category['category_name'], root_path) self.save_course_details_file(category_path, all_courses_by_category)
def payTheBucks(self): # Send to Server Payment Request # Create the Request # MAC Address mac = str ( ':'.join(("%012X" % get_mac() )[i:i + 2] for i in range(0, 12, 2))) req = request.Request({"r": "IfUserPayed", "MAC": mac}) # REQUEST TO SERVER resp = req.CustomPostRequest(req.getData(), req.getUrl(), req.getHeaders()) print "IfUserPayed" print resp if os.path.isfile("check.check"): os.remove("check.check") self.mess["text"] = "OTTIMA SCELTA! CHIUDI E RIAVVIA QUESTA APPLICAZIONE"