def test_session_persistent_params(self): params = {'a': 'a_test'} s = Session() s.params = params # Make 2 requests from Session object, should send header both times r1 = s.get(httpbin('get')) assert params['a'] in r1.content params2 = {'b': 'b_test'} r2 = s.get(httpbin('get'), params=params2) assert params['a'] in r2.content assert params2['b'] in r2.content params3 = {'b': 'b_test', 'a': None, 'c': 'c_test'} r3 = s.get(httpbin('get'), params=params3) assert not params['a'] in r3.content assert params3['b'] in r3.content assert params3['c'] in r3.content
def __init__(self, test, test_result, dns_resolve=True): _Session.__init__(self) self.verify = not os.getenv('BYPASS_SSL_CHECK') self.test = test self.test_result = test_result self.loads_status = None, None, None, None self.dns_resolve = dns_resolve
def curl(self, method, endpoint, params=None): url = '{scheme}://{host}{endpoint}'.format(scheme=self.SCHEME, host=self.HOST, endpoint=endpoint) params = params or {} session = Session() request = Request(method, url, params=params) request = request.prepare() request.headers.update({ 'X-Application-Key': self.KEY, }) response = session.send(request) return GforceResponse(response)
def test_session_persistent_headers(self): heads = {'User-agent': 'Mozilla/5.0'} s = Session() s.headers = heads # Make 2 requests from Session object, should send header both times r1 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r1.content r2 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r2.content self.assertEqual(r2.status_code, 200)
class MeiPai: def __init__(self): self.session = Session() self.session.headers = headers def search(self, query, search_type='mv', page=1): query = query.strip() # topic if query.startswith('#'): topic = query.strip('#') return self.get_videos_by_topic(topic) cache_exists, result = check_cache_and_return_result(query=query, search_type=search_type, page=page) if cache_exists: return result url = 'http://www.meipai.com/search/{search_type}?'.format(search_type=search_type) + \ urlencode({'q': query, 'page': page}) resp = self.session.request('GET', url) html = BeautifulSoup(resp.content, 'html.parser') if search_type == 'mv': video_links = [div.attrs['data-video'].strip() for div in html.find_all(class_='content-l-video')] # associated_words = self.word_association(query) # print("你是否还想搜索:" + ",".join(associated_words)) result = video_links elif search_type == 'topic': result = [div.text.strip().strip('#') for div in html.find_all(class_='tcard-name')] else: result = [] cache_search_result(query, search_type, page, result) return result def get_videos_by_topic(self, topic_name): """ get top videos by topic :param topic_name: :return: """ topic = Topic(topic_name) topic_id = topic.topic_id url = "http://www.meipai.com/topics/hot_timeline?page=1&count=24&tid={topic_id}".format(topic_id=topic_id) resp = self.session.request('GET', url) result = json.loads(resp.text) return [media['video'] for media in result['medias']] # get associated words def word_association(self, word): url = 'http://www.meipai.com/search/word_assoc?' + urlencode({'q': word}) resp = self.session.request('GET', url) return json.loads(resp.text)
class EMDRUploader(Thread): def __init__(self, statsCollector): Thread.__init__(self) self._queue = Queue() self.setDaemon(True) self._session = Session() self._session.headers.update({ "User-Agent": "CRESTMarketTrawler/{0} ([email protected])".format(VERSION) }) self._pool = Pool(size=10) self.statsCollector = statsCollector def notify(self, regionID, typeID, orders): self._queue.put((timestampString(), regionID, typeID, orders)) self.statsCollector.tally("emdr_send_queued") queueSize = self._queue.qsize() self.statsCollector.datapoint("emdr_queue_size", queueSize) if queueSize > 100: logger.error("EMDR submit queue is about {0} items long!".format(queueSize)) elif queueSize > 10: logger.warn("EMDR submit queue is about {0} items long!".format(queueSize)) def run(self): def submit(generationTime, regionID, typeID, orders): uudif = json.dumps(EMDROrdersAdapter(generationTime, regionID, typeID, orders)) res = self._session.post("http://upload.eve-emdr.com/upload/", data=uudif) self.statsCollector.tally("emdr_sent") if res.status_code != 200: logger.error("Error {0} submitting to EMDR: {1}".format(res.status_code, res.content)) self.statsCollector.tally("emdr_errored") while True: (generationTime, regionID, typeID, orders) = self._queue.get() self._pool.spawn(submit, generationTime, regionID, typeID, orders)
def fetch_data(dl_type=None, **kwargs): """ Fetch Receipts, Expenditures, and Committees. dl_type is one of those three choices. kwargs depend on the choice. Receipts and Expenditures need start_date and end_date for search. Committees need a name_start kwarg to pass into the search. Seems like the maximum that you can get is about 250,000 records at a time. """ s = Session() post_data = { '__EVENTTARGET': 'ctl00$ContentPlaceHolder1$btnText', 'ctl00$pnlMenu_CollapsiblePanelExtender_ClientState': 'true', 'ctl00$AccordionStateBoardMenu_AccordionExtender_ClientState': '0', 'ctl00$mtbSearch': '', 'ctl00$AccordionPaneStateBoardMenu_content$AccordionMainContent_AccordionExtender_ClientState': '-1', 'hiddenInputToUpdateATBuffer_CommonToolkitScripts': '1', '__EVENTARGUMENT': '', '__VIEWSTATEGENERATOR': 'E8D1F59A' } if dl_type == 'Receipts': CONT_GET_PARAMS['RcvDate'] = kwargs['start_date'] CONT_GET_PARAMS['RcvDateThru'] = kwargs['end_date'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(CONT_GET_PARAMS)) elif dl_type == 'Committees': COMM_GET_PARAMS['Name'] = kwargs['name_start'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(COMM_GET_PARAMS)) elif dl_type == 'Expenditures': EXP_GET_PARAMS['ExpendedDate'] = kwargs['start_date'] EXP_GET_PARAMS['ExpendedDateThru'] = kwargs['end_date'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(EXP_GET_PARAMS)) elif dl_type == 'Candidates': url = 'http://www.elections.state.il.us/ElectionInformation/CandDataFile.aspx?id=%s' % kwargs['election_id'] g = s.get(url) if 'Unexpected errors occurred trying to populate page' in g.content: return None soup = BeautifulSoup(g.content) view_state = soup.find('input', attrs={'id': '__VIEWSTATE'}).get('value') event_val = soup.find('input', attrs={'id': '__EVENTVALIDATION'}).get('value') post_data['__VIEWSTATE'] = view_state post_data['__EVENTVALIDATION'] = event_val dl_page = s.post(url, data=post_data) if dl_page.status_code == 200: return dl_page.content else: return None
def delete(self, url, **kwargs): return Session.delete( self, self._get_resource_uri(url), **self._set_default_timeout( **kwargs ) )
def __init__(self, device_key): self.base_url = '{0}/{1}/'.format(self.API_URL, self.API_VERSION) self.session = Session() self.session.auth = KeyAuth(device_key) self.session.headers.update({ 'Content-Type': 'application/json' })
def configure_http_session(size=20, max_retries=1, _session=None): """ Return a :class:`requests.Session` object configured with a :class:`requests.adapters.HTTPAdapter` (connection pool) for http and https connections. :param size: The connection pool and maximum size. :type size: int :param max_retries: The maximum number of retries for each connection. :type max_retries: int :param _session: Test-only hook to provide a pre-configured session. """ if _session is not None: return _session adapter = HTTPAdapter( pool_connections=size, pool_maxsize=size, max_retries=max_retries, ) session = Session() session.mount("http://", adapter) session.mount("https://", adapter) session.max_redirects = 1 session.verify = certifi.where() return session
def send(self, request, **kwargs): request.url = resolve(request.url) # started start = datetime.datetime.utcnow() res = _Session.send(self, request, **kwargs) res.started = start res.method = request.method _measure(res) return res
def patch(self, url, data=None, **kwargs): return Session.patch( self, self._get_resource_uri(url), data, **self._set_default_timeout( **kwargs ) )
def __init__(self, statsCollector): Thread.__init__(self) self._queue = Queue() self.setDaemon(True) self._session = Session() self._session.headers.update({ "User-Agent": "CRESTMarketTrawler/{0} ([email protected])".format(VERSION) }) self._pool = Pool(size=10) self.statsCollector = statsCollector
class APIClient(BaseAPIClient): verify = True base_url = None def __init__(self, *args, **kwargs): self.session = Session() spec = self.call(SpecEndpoint()) super(APIClient, self).__init__(*args, spec=spec, **kwargs) def make_request(self, endpoint, request): request.url = self.base_url + request.url prepared = self.session.prepare_request(request) return self.session.send(prepared, stream=False, timeout=None, verify=self.verify, cert=None, proxies={}, allow_redirects=True)
def __init__(self, statsCollector): Thread.__init__(self) self._queue = Queue(EMDR_QUEUE_SIZE) self.setDaemon(True) self._session = Session() self._session.headers.update({ "User-Agent": USER_AGENT_STRING }) self._pool = Pool(size=10) self.statsCollector = statsCollector
def send(self, request, **kwargs): """Do the actual request from within the session, doing some measures at the same time about the request (duration, status, etc). """ # attach some information to the request object for later use. start = datetime.datetime.utcnow() res = _Session.send(self, request, **kwargs) res.started = start res.method = request.method self._analyse_request(res) return res
def __init__(self, service, region_name, host, auth, proxies=None): self.service = service self.session = self.service.session self.region_name = region_name self.host = host self.verify = True self.auth = auth if proxies is None: proxies = {} self.proxies = proxies self.http_session = Session()
class AdvancedSession: session = None error_counter = 0 logger: 'logger' = field(init=False) def __post_init__(self): self.session = Session() self.session.headers.update(HEADERS) self.logger = settings.LocationAdapter(logger, {'location': 'API'}) @api_call def get(self, *args: Any, **kwargs: Any) -> Any: return self.session.get(*args, **kwargs) @api_call def post(self, *args: Any, **kwargs: Any) -> Any: return self.session.post(*args, **kwargs) def _handle_error(self, code: int, message: dict) -> None: self.error_counter += 1 if self.error_counter >= 3: raise AdvancedSessionError(-1, 'Maximum retries exceeded') elif code == 429: self.logger.warning('[429] The server is experiencing too many requests – either from our IP or generally. ' f'Waiting {settings.WAIT_API_CALLS // 60}min before trying again') self.logger.warning('It is highly recommended to avoid any further activity and stop ' 'requesting ImpfterminService during that time') sleep(settings.WAIT_API_CALLS) elif code >= 400: if message: if message.get('errors'): raise AdvancedSessionError(-1, message.get('errors')) if message.get('error') and 'Anfragelimit erreicht' in message.get('error'): raise AdvancedSessionError(-1, 'Maximum requests reached for phone number and email') elif message.get('error'): self.logger.warning(f'Endpoint returned Error: {message.get("error")}') self.logger.info('Cookies probably expired – raising AdvancedSessionCache') raise AdvancedSessionCache(code, message) else: import pdb; pdb.set_trace() raise AdvancedSessionError(code, message)
def request(session: Session, method, url, *args, **kwargs): resp = session.request(method, url, *args, **kwargs) # Check if Cloudflare anti-bot is on if (resp.status_code == 503 and resp.headers.get("Server") == "cloudflare-nginx" and b"jschl_vc" in resp.content and b"jschl_answer" in resp.content): return solve_cf_challenge(session, resp, **kwargs) # Otherwise, no Cloudflare anti-bot detected return resp
def create_session(): adapter = HTTPAdapter(max_retries=3) session = Session() default_ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36" user_agent = current_app.config.get("USER_AGENT", default_ua) session.headers["User-Agent"] = user_agent session.mount("http://", adapter) session.mount("https://", adapter) return session
def __init__(self, account, password): self.account = account self.password = password self.su = base64.b64encode( self.account.encode("utf-8")).decode("utf-8") self.session = Session() self.server_param = {} self.login_rep_str = None self.ret_code = -1 self.cookies = None self.yzm_pic_name = None self.yzm_code = None
def __init__(self, session: Optional[Session], api_url: str, text_type: TextType, text_format: TextFormat): if session: self.session = session else: self.session = Session() self.api_url = api_url self.text_type = text_type if text_format: self.text_format = text_format else: raise TextFormatRequired()
def News(): files = dict(request.files) file_ = next(iter(files.values())) file_.seek(0) tmpFile = tempfile.NamedTemporaryFile(mode='wb', suffix='.%s' % file_.filename.split('.')[-1]) tmpFile.write(file_.read()) df = pd.read_excel(tmpFile.name, engine='openpyxl', header=None, names=['a','b','c', 'd','e','f','g','h','i','j','k']) tmpFile.close() s = Session() with tempfile.TemporaryDirectory() as tmpdirname: zipObj = ZipFile(os.path.join(tmpdirname, 'news.zip'), 'w') for row in df.itertuples(): r = s.get('http://%s' % row.d, allow_redirects=True) if r.status_code != 200: continue with open(os.path.join(tmpdirname, row.k), 'wb') as f: f.write(r.content) zipObj.write(os.path.join(tmpdirname, row.k), row.k) # time.sleep(1) zipObj.close() return send_file(os.path.join(tmpdirname, 'news.zip'))
def __init__(self, authentication_key, change_source): super(PortalConnection, self).__init__() self._authentication_handler = \ _QueryStringAuthenticationHandler(authentication_key) self._change_source = change_source self._session = Session() self._session.headers['User-Agent'] = _USER_AGENT http_adapter = HTTPAdapter(max_retries=_HTTP_CONNECTION_MAX_RETRIES) self._session.mount('', http_adapter)
def test_page_conection(self): ws = WebScraper(Session()) # url +HD ws.parse_page( "https://www.forocoches.com/foro/showthread.php?t=7373097") self.assertTrue(ws.is_private_web()) # añadir cookie de sesion a scraper para tener acceso a +HD cookie = rS.get_cookie() ws.session = cookie ws.parse_page() self.assertFalse(ws.is_private_web())
def __init__( self, *, api_url: str, text_type: TextType, text_format: TextFormat, session: Optional[Session] = None, ): self.session = session or Session() self.api_url = api_url self.text_type = text_type self.text_format = text_format
def get_session(cls, url): """ 根据要访问的url找到合适的session,尽可能重用连接 """ urlkey = "://".join(urlparse(url)[0:2]) if urlkey not in cls._global_sessions: cls._global_sessions[urlkey] = Session() return cls._global_sessions[urlkey]
def _get_session(): DEFAULT_HEADER = { "Content-type": "application/json", "Accept": "application/json", "Accept-Charset": "utf-8", "Cache-Control": "no-cache", } global _session if _session is None: _session = Session() _session.headers.update(DEFAULT_HEADER) return _session
class HTTPRequest2(object): """记录cookies信息给下一次请求使用""" def __init__(self): # 创建session对象 self.session = Session() def request(self, method, url, params=None, data=None, headers=None, cookies=None, json=None): method = method.lower() if method == "post": # 判断是否使用json来传参(适用于接口项目有使用json传参) if json: my_log.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, json)) return self.session.post(url=url, json=json, headers=headers, cookies=cookies) else: my_log.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, data)) return self.session.post(url=url, data=data, headers=headers, cookies=cookies) elif method == "get": my_log.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, params)) return self.session.get(url=url, params=params, headers=headers, cookies=cookies) def close(self): self.session.close()
def update(title, tags, desc): s = Session() URL = 'https://ankiweb.net/account/login' rsp = s.get(URL) soup = BeautifulSoup(rsp.text, features="html.parser") csrf_token = soup.find('input', {'name': 'csrf_token'}).get('value') s.post(URL, data={ 'submit': 1, 'csrf_token': csrf_token, 'username': username, 'password': password }) URL = 'https://ankiweb.net/shared/upload' file = {'v21file': open(f'{MODEL_NAME}.zip', 'rb')} rsp = s.post(URL, files=file, data={ 'title': title, 'tags': tags, 'desc': desc, 'id': addon_id, 'submit': 'Update', 'v21file': file, 'v20file': '', }) if rsp.url == f'https://ankiweb.net/shared/info/{addon_id}': return True else: return False
class DemoApi(object): def __init__(self, base_url): self.base_url = base_url # 创建session实例 self.session = Session() def login(self, username, password): """ 登录接口 :param username: 用户名 :param password: 密码 """ url = urljoin(self.base_url, 'login') data = {'username': username, 'password': password} response = self.session.post(url, data=data).json() print('\n*****************************************') print(u'\n1、请求url: \n%s' % url) print(u'\n2、请求头信息:') pprint(self.session.headers) print(u'\n3、请求参数:') pprint(data) print(u'\n4、响应:') pprint(response) return response def info(self): """ 详情接口 """ url = urljoin(self.base_url, 'info') response = self.session.get(url).json() print('\n*****************************************') print(u'\n1、请求url: \n%s' % url) print(u'\n2、请求头信息:') pprint(self.session.headers) print(u'\n3、请求cookies:') pprint(dict(self.session.cookies)) print(u'\n4、响应:') pprint(response) return response
def __init__( self, base_url, port=None, username=None, password=None, **kwargs ): Session.__init__(self) Client.__init__( self, base_url, port, username, password, **kwargs ) self.verify = False self._set_auth( username, password ) urllib3.disable_warnings()
class Api(object): API_URL = 'http://192.168.33.10' # todo: add to config API_VERSION = 'v1' def __init__(self, device_key): self.base_url = '{0}/{1}/'.format(self.API_URL, self.API_VERSION) self.session = Session() self.session.auth = KeyAuth(device_key) self.session.headers.update({ 'Content-Type': 'application/json' }) def request(self, method, url, **kwargs): """Constructs and sends a Request to the Pinaple API.""" full_url = urljoin(self.base_url, url) if 'data' in kwargs: kwargs['data'] = self._encode_data(kwargs['data']) return super(Api, self).request(method, full_url, **kwargs) def _encode_data(self, data, **kwargs): """Returns data encoded as JSON using a custom encoder.""" encoder = JSONEncoder(**kwargs) if kwargs else self._json_encoder return encoder.encode(data) def test(self): url = urljoin(self.base_url, 'functions/test') response = self.session.get(url) return response def login(self): url = urljoin( self.base_url, 'login' ) response = self.session.post( url ) if response.status_code is not 200: print('[error] device is not authorized') exit() data = response.json() self.session.auth = SessionAuth(data['session_token'])
def get_headerframe_bean_id(self, s: Session, syorui_kanri_no): """書類閲覧フレームのヘッダーHTMLからbeanidを取得する。""" url = EdinetUrl.header_frame_url.value.format(syorui_kanri_no) header_html = s.get(url, timeout=10, headers=self.user_agent, verify=False) header_lines = header_html.text.split('\r\n') beanid_line = [ line for line in header_lines if line.find('"be.bean.id"') >= 0 ][0] return re.search('"be.bean.id", "(.+)"', beanid_line).group(1)
def setUp(self): self.session = Session() # self.base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] # print self.base_path # api_path = os.path.join(self.base_path, '/api') # print api_path # subprocess.check_output('cd /Users/colin/MyDisk/myfiles/python/ApiTestDemo/src/api') # subprocess.check_output('python api_server.py') # super(TestApiServer, self).setUp() self.login_url = 'http://127.0.0.1:5000/login' self.info_url = 'http://127.0.0.1:5000/info' self.username = '******' self.password = '******'
def get_csv(sess: sessions.Session, endpoint: str, **parameters): """ get csv data from the Intrinio API :param sess: session :param endpoint: endpoint :param parameters: query parameters :return: csv result """ auth = os.getenv('INTRINIO_USERNAME'), os.getenv('INTRINIO_PASSWORD') url = '{}/{}'.format( 'https://api.intrinio.com', endpoint + ('' if endpoint.endswith('.csv') else '.csv')) if 'page_size' not in parameters: parameters['page_size'] = 10000 pages = list() for page_number in itertools.count(): parameters['page_number'] = page_number + 1 response = sess.request('GET', url, params=parameters, auth=auth, verify=True) if not response.ok: try: response.raise_for_status() except Exception as err: logging.getLogger(__name__).error(err) new_lines = response.content.decode('utf-8').count('\n') if new_lines == 1: break info, columns, page = response.content.decode('utf-8').split('\n', 2) if page_number == 0: info = {s.split(':')[0]: s.split(':')[1] for s in info.split(',')} total_pages = int(info['TOTAL_PAGES']) pages.append(columns.lower() + '\n') pages.append(page) if len(page) == 0 or page_number + 1 == total_pages: break return ''.join(pages) if len(pages) > 0 else None
def load_loginpage(session: Session, form_meta: Dict[str, str]) -> Dict[str, str]: """ 载入登录页面 :return: * request_verification_token - 表单里的隐藏Token * captcha_image_url - 验证码的URL,地址相对于根路径 :raise RuntimeError 加载登录页面失败 """ url = "https://row1.vfsglobal.com/GlobalAppointment/" payload = {} headers = { 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36', 'Sec-Fetch-User': '******', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-Mode': 'navigate', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8' } def parse_page(page_html): soup = BeautifulSoup(page_html, 'html.parser') form = soup.find('form', {'id': 'ApplicantListForm'}) return { 'request_verification_token': form.find('input', {'name': '__RequestVerificationToken'})['value'], 'captcha_image_url': form.find('img', {'id': 'CaptchaImage'})['src'], } logger.info("尝试加载登录页面") response = session.request("GET", url, headers=headers, data=payload) if response.status_code == 200: return { **form_meta, **parse_page(response.text), } raise RuntimeError('加载登录页面失败', response)
def httpraw(raw: str, ssl: bool = False, **kwargs): """ Send the original HTTP packet request, if you set the parameters such as headers in the parameters, the parameters you set will be sent :param raw: Original packet text :param ssl: whether is HTTPS :param kwargs: Support setting of parameters in requests :return:requests.Response """ raw = raw.strip() # Clear up unnecessary spaces raws = list(map(lambda x: x.strip(), raw.splitlines())) try: method, path, protocol = raws[0].split(" ") except Exception: raise Exception("Protocol format error") post = None _json = None if method.upper() == "POST": index = 0 for i in raws: index += 1 if i.strip() == "": break if len(raws) == index: raise Exception tmp_headers = raws[1:index - 1] tmp_headers = extract_dict('\n'.join(tmp_headers), '\n', ": ") postData = raws[index] try: json.loads(postData) _json = postData except ValueError: post = postData else: tmp_headers = extract_dict('\n'.join(raws[1:]), '\n', ": ") netloc = "http" if not ssl else "https" host = tmp_headers.get("Host", None) if host is None: raise Exception("Host is None") del tmp_headers["Host"] url = "{0}://{1}".format(netloc, host + path) kwargs.setdefault('allow_redirects', True) kwargs.setdefault('data', post) kwargs.setdefault('headers', tmp_headers) kwargs.setdefault('json', _json) with Session() as session: return session.request(method=method, url=url, **kwargs)
def _run_request(self, request): """ Executes HTTP GET request with timeout using the endpoint defined upon client creation. """ session = Session() session.mount("http://", HTTPAdapter(max_retries=self._TOTAL_RETRIES)) session.mount("https://", HTTPAdapter(max_retries=self._TOTAL_RETRIES)) result = session.get(self.endpoint + "?" + request, headers=self._get_custom_headers(), timeout=self.timeout) result.raise_for_status() return result
def test_importData(self, case): se = Session() # 准备用例数据 login_url = "http://doctor.yy365.cn/index/login" login_data = { "username": conf.get("test_data", "admin_user"), "password": conf.get("test_data", "admin_pwd") } response = se.post(url=login_url, data=login_data) url1 = conf.get("env", "url") + case["url"] headers = {"Content-Type": "multipart/form-data"} file = { 'file': open( r"C:\Users\Administrator\Desktop\Romens_Api_Test\data\891407.xls", 'rb') } response2 = se.post(url=url1, files=file, verify=False, headers=headers) res = response2.json() row = case["case_id"] + 1 expected = eval(case["expected"]) try: self.assertEqual(expected["status_code"], response2.status_code) except AssertionError as e: # 结果回写excel中 log.error("用例--{}--执行未通过".format(case["title"])) log.debug("预期结果:{}".format(expected)) log.debug("实际结果:{}".format(res)) log.exception(e) self.excel.write_data(row=row, column=8, value="未通过") raise e else: # 结果回写excel中 log.info("用例--{}--执行通过".format(case["title"])) self.excel.write_data(row=row, column=8, value="通过")
def get_inout_samples(contest: str, problem: str, session: Session) \ -> Dict[str, List[str]]: problem_url = f'{ATCODER_URL}/contests/{contest}/tasks/{contest}_{problem}' res = session.get(problem_url) bs = BeautifulSoup(res.text, "html.parser") divs = bs.find_all('div', class_='part') inputs = [] outputs = [] for div in divs: if "入力例" in div.section.h3.string: inputs.append(div.section.pre.string.replace('\r\n', '\n')) if "出力例" in div.section.h3.string: outputs.append(div.section.pre.string.replace('\r\n', '\n')) return {'input': inputs, 'output': outputs}
def create_session(data, variables_dict): """ 如果接收到的要变参数中有session,且为Session对象,赋值给session变量, 否则创建一个 """ if 'self' in variables_dict: try: data['session'] = getattr( variables_dict.get('self', None), 'session') if not isinstance(data['session'], Session): session = Session() setattr(variables_dict.get('self', None), 'session', session) data['session'] = session except Exception as e: session = Session() setattr(variables_dict.get('self', None), 'session', session) data['session'] = session elif isinstance(variables_dict.get('session'), Session): data['session'] = variables_dict.get('session') else: data['session'] = Session()
def __queryNear(self, sess: Session) -> bool: # 上次填写记录 sess.headers.update({"referer": reportUrl}) res = sess.post( url=tempHeader + "/com.sudytech.work.shgcd.jkxxcj.jkxxcj.queryNear.biz.ext?vpn-12-o2-workflow.sues.edu.cn", verify=False) near_list = res.json()["resultData"] if len(near_list) == 0: return False else: self.__lastData = near_list[0] lower_json(self.__lastData) return True
def __init__(self, access_key, secret_key, account_id, domain='https://mws.amazonservices.com', uri="", version=""): self.access_key = access_key self.secret_key = secret_key self.account_id = account_id self.domain = domain self.uri = uri or self.URI self.version = version or self.VERSION self.session = Session() bucket_key = getattr(settings, 'RUNSCOPE_BUCKET_KEY', None) if bucket_key: logger.info("Redirecting API calls for MWS to runscope") self.configure_runscope(bucket_key)
def download_image(url, **kwargs): """下载验证码图片 :param url: 验证码网址 :param kwargs: requests的请求参数一致 :return: image对象 or None """ response = None times = 0 while times < 10: try: session = Session() response = session.request('GET', url=url, **kwargs) if response.status_code == 200: break except: times += 1 print('Download failed, try it again.') if response: return response.content else: return response
def solve_captcha(session: Session, form_meta: Dict[str, str], howto: Callable[[PIL.Image.Image], str]) -> Dict[str, str]: """ 解决验证码 :param session: Session :param form_meta: 登录页面的结果 :param howto: 如何从验证码图片中解读出文字 :return: * captcha_detext - 验证码ID * captcha_inputtext - 验证码明文 :raise RuntimeError 获取验证码失败 """ captcha_detext = form_meta['captcha_image_url'].split('=')[1] url = "https://row1.vfsglobal.com/GlobalAppointment/DefaultCaptcha/Generate" params = {'t': captcha_detext} payload = {} headers = { 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36', 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8', 'Sec-Fetch-Site': 'same-origin', 'Sec-Fetch-Mode': 'no-cors', 'Referer': 'https://row1.vfsglobal.com/GlobalAppointment/', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', } logger.info("尝试下载验证码图片") response = session.request("GET", url, params=params, headers=headers, data=payload) if response.status_code == 200: from PIL import Image from io import BytesIO image = Image.open(BytesIO(response.content)) captcha_inputtext = howto(image) logger.info("验证码: %s => %s", captcha_detext, captcha_inputtext) del form_meta['captcha_image_url'] return { **form_meta, 'captcha_detext': captcha_detext, 'captcha_inputtext': captcha_inputtext } raise RuntimeError('获取验证码失败', response)
def __init__(self, auth, timeout=None, api_url=None): super(Connection, self).__init__() self._api_url = api_url or self._API_URL self._authentication_handler = auth self._session = Session() self._session.headers['User-Agent'] = _USER_AGENT self._timeout = timeout http_adapter = HTTPAdapter(max_retries=_HTTP_CONNECTION_MAX_RETRIES) self._session.mount('', http_adapter)
def __init__(self, version_build, host, port, storage_engine): self.version_build = version_build self.host = host self.port = port self.storage_engine = storage_engine # Used for commandline programs such as net-mono and macosx self.process = None # For the subclasses, this property may be a file handle or a string self.logfile = None self.session = Session() self.session.headers['Content-Type'] = 'application/json'
def __init__(self, host=XHS_BASE_HOST, prefix="wx_mp_api/sns/v1/", scheme="https"): self._host = host self._prefix = prefix self._base_url = "{}://{}/{}".format(scheme, host, prefix) self._session = Session() ui = random.randint(0, len(USER_AGENTS) - 1) headers = { "User-Agent": USER_AGENTS[ui], "Host": self._host, "Connection": "close", "Accept-Encoding": "br, gzip, deflate", "Content-Type": "application/json", "Accept-Language": "zh-cn", "Device-Fingerprint": "WHJMrwNw1k/Gy/sC6Z1D0XzFNbmyE3cyfJCjTR5D+eJ4GPjHvuEU1skE1O3fkhMVWGWPZ3E6FqIOaBmFkRMqt6xFlRX" "tfTfVBdCW1tldyDzmauSxIJm5Txg==1487582755342", "Authorization": "ea521d9b-c1fb-4f91-8560-bfa160171e5a" } sid = "1572606846528931138054" self._sid = "session.{}".format(sid) self._session.headers.update(headers) self._session.keep_alive = False
def post_delete_endpoint_with_session_and_url( session: Session, url: str, payload_obj: dict, ) -> typing.Tuple[str, int]: payload = json.dumps(payload_obj) response = session.post(url, payload) summary = parse_run_restli_response(response) urn = summary.get("urn", "") rows_affected = summary.get("rows", 0) return urn, rows_affected
def httpraw(raw: str, ssl: bool = False, **kwargs): """ 发送原始HTTP封包请求,如果你在参数中设置了例如headers参数,将会发送你设置的参数 :param raw:原始封包文本 :param ssl:是否是HTTPS :param kwargs:支持对requests中的参数进行设置 :return:requests.Response """ raw = raw.strip() # Clear up unnecessary spaces raws = list(map(lambda x: x.strip(), raw.splitlines())) try: method, path, protocol = raws[0].split(" ") except Exception: raise Exception("Protocol format error") post = None _json = None if method.upper() == "POST": index = 0 for i in raws: index += 1 if i.strip() == "": break if len(raws) == index: raise Exception tmp_headers = raws[1:index - 1] tmp_headers = extract_dict('\n'.join(tmp_headers), '\n', ": ") postData = raws[index] try: json.loads(postData) _json = postData except ValueError: post = postData else: tmp_headers = extract_dict('\n'.join(raws[1:]), '\n', ": ") netloc = "http" if not ssl else "https" host = tmp_headers.get("Host", None) if host is None: raise Exception("Host is None") del tmp_headers["Host"] url = "{0}://{1}".format(netloc, host + path) kwargs.setdefault('allow_redirects', True) kwargs.setdefault('data', post) kwargs.setdefault('headers', tmp_headers) kwargs.setdefault('json', _json) with Session() as session: return session.request(method=method, url=url, **kwargs)
def navigate_subjects_page(session: Session, relogin: bool = True) -> Tuple[bool, Response]: # Get list of classes r = session.get(infor_insc_turmas_url) if r.status_code != 200: logging.info(r.status_code) return False, None if r.url is not infor_insc_turmas_url: logging.info("Navigate to subjects page failed. Relogin and retry") sucess, res = login(session) page = BeautifulSoup(r.text, 'html.parser') # TODO avisar caso LEI não se a primeira # quickfix next_link_part = page.find(id="link_0").a["href"] url = gen_link(infor_insc_turmas_url, next_link_part) r = session.post(url) if r.status_code != 200: logging.info(r.status_code) return False, None return True, r
class HttpClient: """ 使用requests库封装的高可靠 Http client :param max_connect_retries: The maximum number of retries each connection should attempt.Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. :param max_request_tries: The maximum times of tries each request should attempt. """ def __init__(self, max_connect_retries=0, max_request_tries=0): self.timeout = DEFAULT_TIMOUT self.max_connect_retries = (max_connect_retries or DEFAULT_CONNECT_RETRIES) self.max_request_tries = max_request_tries or DEFAULT_REQUEST_TRIES self.session = Session() retries = Retry(connect=2, read=2, status=2, redirect=2) self.session.mount('https://', HTTPAdapter(max_retries=retries)) self.session.mount('http://', HTTPAdapter(max_retries=retries)) def get(self, url, content_type='json', max_request_times=0, timeout=0): max_times = max_request_times or self.max_request_tries has_request_times = 0 data = None while has_request_times < max_times: try: res = self.session.get(url, timeout=timeout or self.timeout) data = res.json() if content_type == 'json' else res.text if not data: has_request_times = has_request_times + 1 continue else: break except requests.exceptions.ConnectionError as e: print("socket连接错误或读取超时", e.__class__) break except Exception: # raise has_request_times = has_request_times + 1 continue if not data: print("尝试了{}次请求依然失败".format(has_request_times + 1)) else: print("尝试了{}次请求成功".format(has_request_times + 1)) self.session.close() return data def post(self, url): pass
def send(self, request, **kwargs): """Do the actual request from within the session, doing some measures at the same time about the request (duration, status, etc). """ if not request.url.startswith('https://'): request.url, original, resolved = dns_resolve(request.url) request.headers['Host'] = original # attach some information to the request object for later use. start = datetime.datetime.utcnow() res = _Session.send(self, request, **kwargs) res.started = start res.method = request.method self._analyse_request(res) return res
class EMDRUploader(Thread): def __init__(self, statsCollector): Thread.__init__(self) self._queue = Queue(EMDR_QUEUE_SIZE) self.setDaemon(True) self._session = Session() self._session.headers.update({ "User-Agent": USER_AGENT_STRING }) self._pool = Pool(size=10) self.statsCollector = statsCollector def notify(self, regionID, orders): self._queue.put((timestampString(), regionID, orders)) self.statsCollector.tally("emdr_send_queued") queueSize = self._queue.qsize() self.statsCollector.datapoint("emdr_queue_size", queueSize) if queueSize > EMDR_QUEUE_SIZE / 2: logger.warn("EMDR submit queue is about {0} items long!".format(queueSize)) def run(self): def submit(generationTime, regionID, orders): chunks = chunkOrders(orders) for idx, orderChunk in enumerate(chunks): with TemporaryFile() as gzfile: ujson.dump( EMDROrdersAdapter(generationTime, regionID, orderChunk), gzip.GzipFile(fileobj=gzfile, mode="wb") ) headers = {'Content-Length': str(gzfile.tell()), 'Content-Encoding': 'gzip', # what EMDR wants # 'Transfer-Encoding': 'gzip' # what is strictly true } gzfile.seek(0, 0) logger.info( "Submitting to EMDR for region {} (chunk {} of {})".format(regionID, idx + 1, len(chunks))) res = self._session.post("http://upload.eve-emdr.com/upload/", data=gzfile, headers=headers) self.statsCollector.tally("emdr_chunks_sent") if res.status_code != 200: logger.error("Error {0} submitting to EMDR: {1}".format(res.status_code, res.content)) self.statsCollector.tally("emdr_errored") while True: (generationTime, regionID, orders) = self._queue.get() self.statsCollector.datapoint("emdr_queue_size", self._queue.qsize()) self._pool.spawn(submit, generationTime, regionID, orders)
class Endpoint(object): """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ def __init__(self, service, region_name, host, auth, proxies=None): self.service = service self.session = self.service.session self.region_name = region_name self.host = host self.verify = True self.auth = auth if proxies is None: proxies = {} self.proxies = proxies self.http_session = Session() def __repr__(self): return "%s(%s)" % (self.service.endpoint_prefix, self.host) def make_request(self, params, list_marker=None): raise NotImplementedError("make_request") def prepare_request(self, request): logger.debug("prepare_request") if self.auth is not None: self.auth.add_auth(request=request) prepared_request = request.prepare() return prepared_request def _send_request(self, request, operation): return self.http_session.send( request, verify=self.verify, stream=operation.is_streaming(), proxies=self.proxies )
class Endpoint(object): """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ def __init__(self, service, region_name, host, auth, proxies=None): self.service = service self.session = self.service.session self.region_name = region_name self.host = host self.verify = True self.auth = auth if proxies is None: proxies = {} self.proxies = proxies self.http_session = Session() self._lock = threading.Lock() def __repr__(self): return '%s(%s)' % (self.service.endpoint_prefix, self.host) def make_request(self, operation, params): logger.debug("Making request for %s (verify_ssl=%s) with params: %s", operation, self.verify, params) request = self._create_request_object(operation, params) prepared_request = self.prepare_request(request) return self._send_request(prepared_request, operation) def _create_request_object(self, operation, params): raise NotImplementedError('_create_request_object') def prepare_request(self, request): if self.auth is not None: with self._lock: # Parts of the auth signing code aren't thread safe (things # that manipulate .auth_path), so we're using a lock here to # prevent race conditions. event = self.session.create_event( 'before-auth', self.service.endpoint_prefix) self.session.emit(event, endpoint=self, request=request, auth=self.auth) self.auth.add_auth(request=request) prepared_request = request.prepare() return prepared_request def _send_request(self, request, operation): attempts = 1 response, exception = self._get_response(request, operation, attempts) while self._needs_retry(attempts, operation, response, exception): attempts += 1 # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() response, exception = self._get_response(request, operation, attempts) return response def _get_response(self, request, operation, attempts): try: logger.debug("Sending http request: %s", request) http_response = self.http_session.send( request, verify=self.verify, stream=operation.is_streaming(), proxies=self.proxies) except Exception as e: return (None, e) # This returns the http_response and the parsed_data. return (botocore.response.get_response(self.session, operation, http_response), None) def _needs_retry(self, attempts, operation, response=None, caught_exception=None): event = self.session.create_event( 'needs-retry', self.service.endpoint_prefix, operation.name) handler_response = self.session.emit_first_non_none_response( event, response=response, endpoint=self, operation=operation, attempts=attempts, caught_exception=caught_exception) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. logger.debug("Response received to retry, sleeping for " "%s seconds", handler_response) time.sleep(handler_response) return True
class PortalConnection(object): """ Connection to HubSpot :param authentication_key: This can be either an :class:`APIKey` or an \ :class:`OAuthKey` instance :param basestring change_source: The string passed to HubSpot as \ ``auditId`` in the query string """ _API_URL = 'https://api.hubapi.com' def __init__(self, authentication_key, change_source): super(PortalConnection, self).__init__() self._authentication_handler = \ _QueryStringAuthenticationHandler(authentication_key) self._change_source = change_source self._session = Session() self._session.headers['User-Agent'] = _USER_AGENT http_adapter = HTTPAdapter(max_retries=_HTTP_CONNECTION_MAX_RETRIES) self._session.mount('', http_adapter) def send_get_request(self, url_path, query_string_args=None): """ Send a GET request to HubSpot :param basestring url_path: The URL path to the endpoint :param dict query_string_args: The query string arguments :return: Decoded version of the ``JSON`` that HubSpot put in \ the body of the response. """ return self._send_request('GET', url_path, query_string_args) def send_post_request(self, url_path, body_deserialization): """ Send a POST request to HubSpot :param basestring url_path: The URL path to the endpoint :param dict body_deserialization: The request's body message \ deserialized :return: Decoded version of the ``JSON`` that HubSpot put in \ the body of the response. """ return self._send_request( 'POST', url_path, body_deserialization=body_deserialization, ) def send_put_request(self, url_path, body_deserialization): """ Send a PUT request to HubSpot :param basestring url_path: The URL path to the endpoint :param body_deserialization: The request's body message deserialized :return: Decoded version of the ``JSON`` that HubSpot put in \ the body of the response. """ return self._send_request( 'PUT', url_path, body_deserialization=body_deserialization, ) def send_delete_request(self, url_path): """ Send a DELETE request to HubSpot :param basestring url_path: The URL path to the endpoint :return: Decoded version of the ``JSON`` that HubSpot put in \ the body of the response. """ return self._send_request('DELETE', url_path) def _send_request( self, method, url_path, query_string_args=None, body_deserialization=None, ): url = self._API_URL + url_path query_string_args = query_string_args or {} query_string_args = dict(query_string_args, auditId=self._change_source) request_headers = \ {'content-type': 'application/json'} if body_deserialization else {} if body_deserialization: request_body_serialization = json_serialize(body_deserialization) else: request_body_serialization = None response = self._session.request( method, url, params=query_string_args, auth=self._authentication_handler, data=request_body_serialization, headers=request_headers, ) response_body_deserialization = \ self._deserialize_response_body(response) return response_body_deserialization @classmethod def _deserialize_response_body(cls, response): cls._require_successful_response(response) cls._require_json_response(response) if response.status_code == HTTP_STATUS_OK: response_body_deserialization = response.json() elif response.status_code in _HTTP_STATUS_CODES_WITH_EMPTY_BODIES: response_body_deserialization = None else: exception_message = \ 'Unsupported response status {}'.format(response.status_code) raise HubspotUnsupportedResponseError(exception_message) return response_body_deserialization @staticmethod def _require_successful_response(response): if 400 <= response.status_code < 500: response_data = response.json() error_data = _HUBSPOT_ERROR_RESPONSE_SCHEMA(response_data) if response.status_code == HTTP_STATUS_UNAUTHORIZED: exception_class = HubspotAuthenticationError else: exception_class = HubspotClientError raise exception_class( error_data['message'], error_data['requestId'], ) elif 500 <= response.status_code < 600: raise HubspotServerError(response.reason, response.status_code) @staticmethod def _require_json_response(response): content_type_header_value = response.headers.get('Content-Type') if not content_type_header_value: exception_message = 'Response does not specify a Content-Type' raise HubspotUnsupportedResponseError(exception_message) content_type = content_type_header_value.split(';')[0].lower() if content_type != 'application/json': exception_message = \ 'Unsupported response content type {}'.format(content_type) raise HubspotUnsupportedResponseError(exception_message) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._session.close()
def cmd_search_word(term): """Searches word translations at the http://slovari.yandex.ru. This command requires `simplejson` module to be installed. """ import simplejson template = """ <ul> %for v in variants: <li><a href="/?s=save_word+{{ v['en'].replace(' ', '+') }}%3B+{{ v['ru'].replace(' ', '+').replace(',', '%2C') }}">{{ v['en'] }}</a> %if v['transcript']: ({{ v['transcript'] }}) %end %if v['has_audio']: <object type="application/x-shockwave-flash" data="http://audio.lingvo.yandex.net/swf/lingvo/lingvo-player.swf" width="27" height="27" style="visibility: visible;"> <param name="allowscriptaccess" value="always"> <param name="wmode" value="transparent"> <param name="flashvars" value="color=0xFFFFFF&size=27&counter-path=slovari&count=yes&service-url=http://audio.lingvo.yandex.net&download-url-prefix=sounds&timestamp-url-prefix=timestamp.xml&language=SoundEn&sound-file={{ v['en'] }}.mp3"> </object> %end — {{ v['ru'] }}</li> %end </ul> %rebase layout title='Word translation' """ variants = {} internet = Session() for i in reversed(range((len(term) + 1) / 2, len(term) + 1)): url = 'http://suggest-slovari.yandex.ru/suggest-lingvo?v=2&lang=en&' + \ urllib.urlencode(dict(part=term[:i].encode('utf-8'))) response = internet.get(url) data = simplejson.loads(response.content) if data[0]: for trans, link in zip(*data[1:]): en, ru = trans.split(' - ', 1) variants[en] = dict(en=en, ru=ru, link=link) if len(variants) > 5: break def get_spelling(value): url = 'http://lingvo.yandex.ru/' + force_str(value['en']).replace(' ', '%20') + '/%D1%81%20%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%BE%D0%B3%D0%BE/' data = internet.get(url).content xml = ET.fromstring(force_str(data)) transcript = xml.find('*//{x}span[@class="b-translate__tr"]'.format(x=xhtml)) if transcript is None: value['transcript'] = '' else: value['transcript'] = transcript.text has_audio = xml.find('*//{x}h1[@class="b-translate__word"]//{x}span[@class="b-audio g-js"]'.format(x=xhtml)) value['has_audio'] = has_audio is not None return value variants = dict((key, get_spelling(value)) for key, value in variants.iteritems()) return dict(template=template, variants=sorted(variants.values()))
def __init__(self): self.session = Session() self.session.headers = headers