def test_session_persistent_params(self): params = {'a': 'a_test'} s = Session() s.params = params # Make 2 requests from Session object, should send header both times r1 = s.get(httpbin('get')) assert params['a'] in r1.content params2 = {'b': 'b_test'} r2 = s.get(httpbin('get'), params=params2) assert params['a'] in r2.content assert params2['b'] in r2.content params3 = {'b': 'b_test', 'a': None, 'c': 'c_test'} r3 = s.get(httpbin('get'), params=params3) assert not params['a'] in r3.content assert params3['b'] in r3.content assert params3['c'] in r3.content
def get_submit_results(contest: str, session: Session) -> Dict[str, List[str]]: result_url = \ f'{ATCODER_URL}/contests/{contest}/submissions/me?orderBy=created' res = session.get(f'{result_url}&page=1') bs = BeautifulSoup(res.text, "html.parser") pages = bs.find('ul', class_='pagination').findAll('li') if not pages: return {} max_page = max(map(lambda x: int(x.a.string), pages)) results = {} for i in range(1, max_page + 1): if i > 1: res = session.get(f'{result_url}&page={i}') bs = BeautifulSoup(res.text, "html.parser") rows = bs.find('tbody').findAll('tr') for tr in rows: tds = tr.findAll('td') prob = tds[1].a.get('href').split('_')[-1].lower() status = tds[6].span.string if prob not in results: results[prob] = [] results[prob].append(status) return results
def main(args): directory = Path(args.directory) directory.mkdir(exist_ok=True, parents=True) session = Session() session.headers.update({ 'Cookie': args.cookie, 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36', }) response = session.get(args.target) if response.status_code != 200: print('[ERROR] Get basic url failed!') return data = response.text video_list = list(map(lambda x: urljoin(BASE_URL, x), get_list(data))) for url in tqdm(video_list): data = session.get(url).text title = re.search('<title>(.*?)</title>', data, re.DOTALL).group(1).split('|')[0].strip() try: subtitle_url = parser_subtitle_url(data) except AttributeError: # it means it's not a video continue try: subtitle = session.get(subtitle_url).json()['text'] open(directory.joinpath(f'{title}.txt'), 'w').write('\n'.join(subtitle)) except: # Generally speaking, it means a video without subtitle print(f'[INFO] ignore url {url}')
def init_session_id(login: str, password: str, cluster_host: str) -> None: """ Initialize Session ID value from a Cookie after authentication. :param login: Login of a Static Dex user :type login: str :param password: Password of a Static Dex user :type password: str :param cluster_host: Base Host of a cluster :type cluster_host: str :return: None """ global _session_cookies, _jenkins_credentials session = Session() for auth_endpoint_url in AUTH_ENDPOINT_URLS: response = session.get(auth_endpoint_url.format(cluster_host)) if response.status_code != 200: raise IOError( 'Authentication endpoint is unavailable, got {} http code'. format(response.status_code)) if response.url.startswith(AUTHENTICATION_HOSTNAME.format( cluster_host)): # if auth form is opened match = re.search(REQUEST_ID_REGEXP, response.text) if match: request_id = match.group(1) else: raise ValueError('Request ID was not found on page') url = AUTHENTICATION_PATH.format(cluster_host, request_id) data = {PARAM_NAME_LOGIN: login, PARAM_NAME_PASSWORD: password} response = session.post(url, data) if response.status_code != 200: raise IOError( 'Unable to authorise, got {} http code from {} for the query to {} with data, response {}' .format(response.status_code, auth_endpoint_url.format(cluster_host), url, data, response.text)) for cookie_name in session.cookies.keys(): if cookie_name.startswith(SESSION_ID_COOKIE_NAMES): _session_cookies[cookie_name] = session.cookies.get( cookie_name) if len(_session_cookies) == 0: raise ValueError('Cant find any session ID in Cookies') response = session.get(JENKINS_PROFILE_URL.format(cluster_host, login)) if response.status_code == 200: regex_output = JENKINS_API_TOKEN_REGEX.search(response.text) if regex_output: _jenkins_credentials = (login, regex_output.group(1))
def test_session_persistent_headers(self): heads = {'User-agent': 'Mozilla/5.0'} s = Session() s.headers = heads # Make 2 requests from Session object, should send header both times r1 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r1.content r2 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r2.content self.assertEqual(r2.status_code, 200)
def test_session_persistent_headers(self): heads = {'User-agent': 'Mozilla/5.0'} s = Session() s.headers = heads # Make 2 requests from Session object, should send header both times r1 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r1.content r2 = s.get(httpbin('user-agent')) assert heads['User-agent'] in r2.content self.assertEqual(r2.status_code, 200)
def get_problems(contest: str, session: Session) -> List[str]: problems_url = f'{ATCODER_URL}/contests/{contest}/tasks' res = session.get(problems_url) ng = not res if ng: contest_url = f'{ATCODER_URL}/contests/{contest}' res = session.get(contest_url) bs = BeautifulSoup(res.text, "html.parser") rows = bs.find('tbody').findAll("tr") if ng: return [row.findAll('td')[0].string.lower() for row in rows] else: return [row.findAll('td')[0].a.string.lower() for row in rows]
class Test_Api(object): def __init__(self): self.base_url = row_data[2] # 'http://127.0.0.1:5000/api/users/' self.session = Session() def test_create_user(self): url = urljoin(self.base_url, url_path) response = self.session.post(url, data) #.json() # response = requests.post(url, data) resp_json = response.json() print response print resp_json print type(resp_json) try: assert resp_json == dict( expected_resp) and response.status_code == 200 except AssertionError: print traceback.print_exc() # response = requests.post(url, data) # print response.json() # print response.content # print response.cookies # print response.headers # print response.status_code # print response.text # print self.session.headers # print self.session.cookies # print self.session.params def get_user(self): url = urljoin(self.base_url, '002') response = self.session.get(url) # print response print response.text print response.json() print response.content def update_user(self): url = urljoin(self.base_url, '001') resp = self.session.post(url) print resp.json() def get_users(self): url = 'http://127.0.0.1:5000/api/users' response = self.session.get(url) print response.content
class HttpRequest: def __init__(self): self.session = Session() def __del__(self): self.session.close() def request(self, method, url, data=None, params=None, json=None, headers=None, cookies=None, timeout=None): if method.lower() == 'get': my_log.info(f'Sending {method}:{url} {params}') res = self.session.get(url=url, params=params, headers=headers, cookies=cookies, timeout=timeout) elif method.lower() == 'post': if json: my_log.info(f'Sending {method}:{url} {json}') res = self.session.post(url=url, json=json, headers=headers, cookies=cookies, timeout=timeout) else: my_log.info(f'Sending {method}:{url} {data}') res = self.session.post(url=url, data=data, headers=headers, cookies=cookies, timeout=timeout) else: res = None if res.status_code == 404: my_log.error(f'404 not found!') raise RuntimeError return res.text def close(self): self.session.close()
def LoadLeagues(url: str, sess:Session, timeout=5) -> (list, dict): leagues = list() leaguesPathes = dict() r, err = sess.get(url, verify=False, timeout=timeout) if err: return leagues, leaguesPathes doc = html.fromstring(r.text) items = doc.xpath(".//ul[@class=\"countries\"]/li") for item in items: if "title" in item.attrib: country = item.attrib["title"] continue anchor = item.xpath("./a") assert(len(anchor) == 1) assert("href" in anchor[0].attrib) anchor = anchor[0] leagueName = anchor.text leagueLink = anchor.attrib["href"] league = League(leagueName, country) if checkLeague(league): # TODO Remove this check with something appropriate leaguesPathes[league] = leagueLink leagues.append(league) return leagues, leaguesPathes
def update(title, tags, desc): s = Session() URL = 'https://ankiweb.net/account/login' rsp = s.get(URL) soup = BeautifulSoup(rsp.text, features="html.parser") csrf_token = soup.find('input', {'name': 'csrf_token'}).get('value') s.post(URL, data={ 'submit': 1, 'csrf_token': csrf_token, 'username': username, 'password': password }) URL = 'https://ankiweb.net/shared/upload' file = {'v21file': open(f'{MODEL_NAME}.zip', 'rb')} rsp = s.post(URL, files=file, data={ 'title': title, 'tags': tags, 'desc': desc, 'id': addon_id, 'submit': 'Update', 'v21file': file, 'v20file': '', }) if rsp.url == f'https://ankiweb.net/shared/info/{addon_id}': return True else: return False
class HTTPRequest2(object): """记录cookies信息给下一次请求使用""" def __init__(self): # 创建session对象 self.session = Session() def request(self, method, url, params=None, data=None, headers=None, cookies=None, json=None): method = method.lower() if method == "post": # 判断是否使用json来传参(适用于接口项目有使用json传参) if json: logging.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, json)) return self.session.post(url=url, json=json, headers=headers, cookies=cookies) else: logging.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, data)) return self.session.post(url=url, data=data, headers=headers, cookies=cookies) elif method == "get": logging.info("正在发送请求,请求地址:{}, 请求参数:{}".format(url, params)) return self.session.get(url=url, params=params, headers=headers, cookies=cookies) def close(self): self.session.close()
def fetch_data(election_id): s = Session() post_data = { '__EVENTTARGET': 'ctl00$ContentPlaceHolder1$btnText', 'ctl00$pnlMenu_CollapsiblePanelExtender_ClientState': 'true', 'ctl00$AccordionStateBoardMenu_AccordionExtender_ClientState': '0', 'ctl00$mtbSearch': '', 'ctl00$AccordionPaneStateBoardMenu_content$AccordionMainContent_AccordionExtender_ClientState': '-1', 'hiddenInputToUpdateATBuffer_CommonToolkitScripts': '1', '__EVENTARGUMENT': '', } url = 'http://www.elections.state.il.us/ElectionInformation/CandDataFile.aspx?id=%s' % election_id g = s.get(url) soup = BeautifulSoup(g.content) view_state = soup.find('input', attrs={'id': '__VIEWSTATE'}).get('value') event_val = soup.find('input', attrs={ 'id': '__EVENTVALIDATION' }).get('value') post_data['__VIEWSTATE'] = view_state post_data['__EVENTVALIDATION'] = event_val dl_page = s.post(url, data=post_data) if dl_page.status_code == 200: return dl_page.content else: return None
def login(session: Session) -> Tuple[bool, Response]: # GET infor page r = session.get(infor_login_url) if r.status_code != 200: return (False, r) page = BeautifulSoup(r.text, 'html.parser') cookie = r.headers["Set-Cookie"].split()[0] # TODO the cookie is set in the request itself session.headers.update({"Cookie": cookie}) login_form = page.find(id="loginFormBean") action = login_form["action"] login_url = infor_base_url + action # POST login attempt form_data = { "username": config.defaults()['username'], "password": config.defaults()['password'] } r = session.post(login_url, data=form_data) if r.status_code != 200: return (False, r) # Check if correctly authenticated page = BeautifulSoup(r.text, 'html.parser') errors_div = page.find(id="div_erros_preenchimento_formulario") if errors_div is not None: error_text = errors_div.div.ul.li.text logging.info(error_text) return (False, r) return (True, r)
async def markfinish(app, s: Session, group, member, msg: str): text = msg.split(' ') if len(text) == 1: await app.sendGroupMessage(group, MessageChain.create([Plain('请输入标题!')])) elif len(text) != 2: return event_title = text[1] url = 'http://canvas.tongji.edu.cn/api/v1/planner/items?per_page=50&start_date=' + \ datetime.now().strftime('%Y-%m-%d') r = s.get(url) data = json.loads(r.text.replace('while(1);', '')) plannable_id = '' event_id = None for i in data: if i['context_type'] == 'User' and i['plannable'][ 'title'] == event_title: plannable_id = i['plannable_id'] if i['planner_override'] != None: event_id = i['planner_override']['id'] break else: await app.sendGroupMessage( group, MessageChain.create([Plain('查无此事件,请检查标题是否输入正确')])) data = { 'id': event_id, 'marked_complete': True, 'plannable_id': plannable_id, 'plannable_type': 'calendar_event', 'user_id': get_id(member.id), 'authenticity_token': parse.unquote( requests.utils.dict_from_cookiejar(s.cookies)['_csrf_token']) } if event_id == None: url = 'http://canvas.tongji.edu.cn/api/v1/planner/overrides' r = s.post(url, data=data) else: url = 'http://canvas.tongji.edu.cn/api/v1/planner/overrides/' + \ str(event_id) r = s.put(url, data=data) if not is_json(r.text) or 'errors' in r.json(): await app.sendGroupMessage(group, MessageChain.create([Plain('标记为完成失败!')])) else: await app.sendGroupMessage(group, MessageChain.create([Plain('标记为完成成功!')]))
def get_mainframe_request_key(self, s: Session, syorui_kanri_no): """書類閲覧フレームのイニシャライズHTMLからヘッダ取得用GETパラメータにあたるキーを取得する。""" url = EdinetUrl.frame_initialize_url.value.format(syorui_kanri_no) html = s.get(url, timeout=10, headers=self.user_agent, verify=False) bs = BeautifulSoup(html.text, "lxml") return bs.select("[name=viewFrame]")[0].get('src')
def test_should_return_the_hello_message(client: Session): expected_http_status = 200 expected_json = {"message": "Hello from drivr's API."} response = client.get("/") assert response.status_code == expected_http_status assert response.json() == expected_json
def get(self, url, **kwargs): return Session.get( self, self._get_resource_uri(url), **self._set_default_timeout( **kwargs ) )
class BdXhsApi(object): def __init__(self, host=XHS_BASE_HOST, prefix="fe_api/burdock/baidu/v2/", scheme="https"): self._host = host self._prefix = prefix self._base_url = "{}://{}/{}".format(scheme, host, prefix) self._session = Session() ui = random.randint(0, len(USER_AGENTS) - 1) headers = { "User-Agent": USER_AGENTS[ui], "Host": self._host, "Connection": "close", "Content-Type": "application/json", "Accept-Language": "zh-cn" } self._session.headers.update(headers) self._session.keep_alive = False def _get(self, route, sign_code, retry=1): url = urljoin(self._base_url, route) headers = {"X-Sign": "X{}".format(sign_code)} try: ret = self._session.get(url, headers=headers, timeout=(2, 5)) LOG.info("GET {} {}".format(ret.url, ret.status_code)) except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as e: LOG.error(e) return None error = ret.text import pdb pdb.set_trace() if 200 <= ret.status_code < 300: data = ret.json() code = data.get("code") if code == 0: return data["data"] if retry: return self._get(route, sign_code, retry - 1) raise Exception('Error url:{} -{}'.format(url, error)) def get_homefeed_categories(self): route = "homefeed/categories" sign_code = "144fc3e8f0039f6fdf80d42152805e2d" result = self._get(route, sign_code=sign_code) return result def get_search_tending(self): route = "search/trending" sign_code = "1d2bce3df0a71e0b906c0ef74a7637ab" result = self._get(route, sign_code=sign_code) return result def close(self): return self._session.close()
def get_current_user(session: Session) -> str: quit_url = f'{ATCODER_URL}/quit' res = session.get(quit_url, allow_redirects=False) if res.status_code != 200: return '' else: bs = BeautifulSoup(res.text, "html.parser") la = bs.find('label', text=re.compile('(Username|ユーザ名)')) return la.parent.find('input')['value']
def ghibli_request(api: Api, session: Session) -> Tuple[str, List]: try: with session.get(api.value, timeout=REQUESTS_TIMEOUT) as response: data = response.json() response.raise_for_status() return api.name, data except Exception as e: raise GhibliRequestError( f'Fetch data error. Url: "{api.value}". Exc: {e}')
def get_list(session: Session, **params) -> Response: url = URL_ENGINE_API + '/user' param_member_of_tenant = params.get('memberOfTenant') if param_member_of_tenant is not None: url = url + '?memberOfTenant=' + param_member_of_tenant app.logger.debug("UserApi#get_list: {}".format(url)) return session.get(url=url)
def get_list(session: Session, **params) -> Response: url = URL_ENGINE_API + '/tenant' param_user_member = params.get('userMember') if param_user_member is not None: url = url + '?userMember=' + param_user_member app.logger.debug("TenantApi#get_list: {}".format(url)) return session.get(url=url)
def get_breaking_bad_wikipedia_character_info(character: str, session: Session) -> str: url = f"{BREAKING_BAD_WIKIPEDIA_URL}/{character}" response = session.get(url) soup = BeautifulSoup(response.text, features="html.parser") paragraphs = soup.select("p") return "\n".join([paragraph.text for paragraph in paragraphs[:5]])
def goto_login_page(response: Response, session: Session) -> Response: """Go to the (Concordia) Netname login page. Return the response page.""" # The "Click here to log in using your Netname" button's URL url = BeautifulSoup(response.content, "lxml").select_one( "a[href^='https://moodle.concordia.ca/moodle/auth/saml2/login.php']" ).get("href") response = session.get(url, timeout=3) return response
def is_session(s: Session, member_id: int) -> Session: url = 'http://canvas.tongji.edu.cn/api/v1/planner/items?per_page=50&start_date=' + \ datetime.now().strftime('%Y-%m-%d') r = s.get(url) data = json.loads(r.text.replace('while(1);', '')) if 'error' in data or r.status_code == 401: print("检测到canvas登录状态丢失,现尝试重新登录canvas") s.close() s = createlink(member_id) return s
def get_all_tasks_in_list(s: Session, todolist_id: str) -> List[TodoTask]: tasks = list() i = 0 while True: next_endpoint = GRAPH_URL + f'/me/todo/lists/{todolist_id}/tasks?$skip={10*i}' tasks_data = s.get(next_endpoint).json() tasks.extend([TodoTask.from_dict(task) for task in tasks_data['value']]) i+=1 if len(tasks_data['value']) < 10: return tasks;
def get_idinfo_from_access_token(access_token: str) -> dict: """Fetches user information using the access token provided. Raises ValueError if an error occurs, including the user not being authorized. """ session = Session() r = session.get(USERINFO_ENDPOINT, headers={'Authorization': 'Bearer ' + access_token}) if r.status_code != 200: raise ValueError("Unexpected response code %d" % r.status_code) return r.json()
def _run_request(self, request): """ Executes HTTP GET request with timeout using the endpoint defined upon client creation. """ session = Session() session.mount("http://", HTTPAdapter(max_retries=self._TOTAL_RETRIES)) session.mount("https://", HTTPAdapter(max_retries=self._TOTAL_RETRIES)) result = session.get(self.endpoint + "?" + request, headers=self._get_custom_headers(), timeout=self.timeout) result.raise_for_status() return result
class BaseRequest(object): def __init__(self, session=None): if not isinstance(session, Session): self.session = Session() self.session.mount('http://', HTTPAdapter(max_retries=1, pool_maxsize=50)) self.session.mount('http://', HTTPAdapter(max_retries=1, pool_maxsize=50)) else: self.session = session @set_default def get(self, url, params, timeout=1, callback=None, **kwargs): with self.catch_exception(): r = self._get_result(url, params, timeout, **kwargs) if callable(callback): callback(r) return r @set_default def post(self, url, data, timeout=1, callback=None, **kwargs): with self.catch_exception(): r = self._post_result(url, data, timeout, **kwargs) if callable(callback): callback(r) return r def _get_result(self, url, params, timeout, **kwargs): r = self.session.get(url, params=params, timeout=timeout, **kwargs) r.raise_for_status() return r def _post_result(self, url, data, timeout, **kwargs): r = self.session.post(url, data, timeout=timeout, **kwargs) r.raise_for_status() return r @contextmanager def catch_exception(self): try: yield except(ConnectionError, Timeout) as err: raise VendorConnectionError(str(err), data=err) except HTTPError as err: raise VendorHTTPError(str(err), data=err) except RequestException as err: raise VendorRequestError(str(err), data=err)
class HttpClient: """ 使用requests库封装的高可靠 Http client :param max_connect_retries: The maximum number of retries each connection should attempt.Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. :param max_request_tries: The maximum times of tries each request should attempt. """ def __init__(self, max_connect_retries=0, max_request_tries=0): self.timeout = DEFAULT_TIMOUT self.max_connect_retries = (max_connect_retries or DEFAULT_CONNECT_RETRIES) self.max_request_tries = max_request_tries or DEFAULT_REQUEST_TRIES self.session = Session() retries = Retry(connect=2, read=2, status=2, redirect=2) self.session.mount('https://', HTTPAdapter(max_retries=retries)) self.session.mount('http://', HTTPAdapter(max_retries=retries)) def get(self, url, content_type='json', max_request_times=0, timeout=0): max_times = max_request_times or self.max_request_tries has_request_times = 0 data = None while has_request_times < max_times: try: res = self.session.get(url, timeout=timeout or self.timeout) data = res.json() if content_type == 'json' else res.text if not data: has_request_times = has_request_times + 1 continue else: break except requests.exceptions.ConnectionError as e: print("socket连接错误或读取超时", e.__class__) break except Exception: # raise has_request_times = has_request_times + 1 continue if not data: print("尝试了{}次请求依然失败".format(has_request_times + 1)) else: print("尝试了{}次请求成功".format(has_request_times + 1)) self.session.close() return data def post(self, url): pass
class DemoApi(object): def __init__(self, base_url): self.base_url = base_url # 创建session实例 self.session = Session() def login(self, mobile, password): """ 登录接口 :param username: 用户名 :param password: 密码 """ url = urljoin(self.base_url, 'account/login') data = {'mobile': mobile, 'password': password} headers = { 'content-type': 'application/json', 'X-Token': 'mobile', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' } response = self.session.post(url, data=json.dumps(data), headers=headers).json() print('\n*****************************************') print(u'\n1、请求url: \n%s' % url) print(u'\n2、请求头信息:') pprint(self.session.headers) print(u'\n3、请求参数:') pprint(data) print(u'\n4、响应:') pprint(response) return response def info(self): """ 详情接口 """ url = urljoin(self.base_url, 'account/my_assets') response = self.session.get(url).json() print('\n*****************************************') print(u'\n1、请求url: \n%s' % url) print(u'\n2、请求头信息:') pprint(self.session.headers) print(u'\n3、请求cookies:') pprint(dict(self.session.cookies)) print(u'\n4、响应:') pprint(response) return response
def get_headerframe_bean_id(self, s: Session, syorui_kanri_no): """書類閲覧フレームのヘッダーHTMLからbeanidを取得する。""" url = EdinetUrl.header_frame_url.value.format(syorui_kanri_no) header_html = s.get(url, timeout=10, headers=self.user_agent, verify=False) header_lines = header_html.text.split('\r\n') beanid_line = [ line for line in header_lines if line.find('"be.bean.id"') >= 0 ][0] return re.search('"be.bean.id", "(.+)"', beanid_line).group(1)
def fetch_data(dl_type=None, **kwargs): """ Fetch Receipts, Expenditures, and Committees. dl_type is one of those three choices. kwargs depend on the choice. Receipts and Expenditures need start_date and end_date for search. Committees need a name_start kwarg to pass into the search. Seems like the maximum that you can get is about 250,000 records at a time. """ s = Session() post_data = { '__EVENTTARGET': 'ctl00$ContentPlaceHolder1$btnText', 'ctl00$pnlMenu_CollapsiblePanelExtender_ClientState': 'true', 'ctl00$AccordionStateBoardMenu_AccordionExtender_ClientState': '0', 'ctl00$mtbSearch': '', 'ctl00$AccordionPaneStateBoardMenu_content$AccordionMainContent_AccordionExtender_ClientState': '-1', 'hiddenInputToUpdateATBuffer_CommonToolkitScripts': '1', '__EVENTARGUMENT': '', '__VIEWSTATEGENERATOR': 'E8D1F59A' } if dl_type == 'Receipts': CONT_GET_PARAMS['RcvDate'] = kwargs['start_date'] CONT_GET_PARAMS['RcvDateThru'] = kwargs['end_date'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(CONT_GET_PARAMS)) elif dl_type == 'Committees': COMM_GET_PARAMS['Name'] = kwargs['name_start'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(COMM_GET_PARAMS)) elif dl_type == 'Expenditures': EXP_GET_PARAMS['ExpendedDate'] = kwargs['start_date'] EXP_GET_PARAMS['ExpendedDateThru'] = kwargs['end_date'] url = '%s/DownloadList.aspx?%s' % (BASE_URL, urlencode(EXP_GET_PARAMS)) elif dl_type == 'Candidates': url = 'http://www.elections.state.il.us/ElectionInformation/CandDataFile.aspx?id=%s' % kwargs['election_id'] g = s.get(url) if 'Unexpected errors occurred trying to populate page' in g.content: return None soup = BeautifulSoup(g.content) view_state = soup.find('input', attrs={'id': '__VIEWSTATE'}).get('value') event_val = soup.find('input', attrs={'id': '__EVENTVALIDATION'}).get('value') post_data['__VIEWSTATE'] = view_state post_data['__EVENTVALIDATION'] = event_val dl_page = s.post(url, data=post_data) if dl_page.status_code == 200: return dl_page.content else: return None
class Api(object): API_URL = 'http://192.168.33.10' # todo: add to config API_VERSION = 'v1' def __init__(self, device_key): self.base_url = '{0}/{1}/'.format(self.API_URL, self.API_VERSION) self.session = Session() self.session.auth = KeyAuth(device_key) self.session.headers.update({ 'Content-Type': 'application/json' }) def request(self, method, url, **kwargs): """Constructs and sends a Request to the Pinaple API.""" full_url = urljoin(self.base_url, url) if 'data' in kwargs: kwargs['data'] = self._encode_data(kwargs['data']) return super(Api, self).request(method, full_url, **kwargs) def _encode_data(self, data, **kwargs): """Returns data encoded as JSON using a custom encoder.""" encoder = JSONEncoder(**kwargs) if kwargs else self._json_encoder return encoder.encode(data) def test(self): url = urljoin(self.base_url, 'functions/test') response = self.session.get(url) return response def login(self): url = urljoin( self.base_url, 'login' ) response = self.session.post( url ) if response.status_code is not 200: print('[error] device is not authorized') exit() data = response.json() self.session.auth = SessionAuth(data['session_token'])
def cmd_search_word(term): """Searches word translations at the http://slovari.yandex.ru. This command requires `simplejson` module to be installed. """ import simplejson template = """ <ul> %for v in variants: <li><a href="/?s=save_word+{{ v['en'].replace(' ', '+') }}%3B+{{ v['ru'].replace(' ', '+').replace(',', '%2C') }}">{{ v['en'] }}</a> %if v['transcript']: ({{ v['transcript'] }}) %end %if v['has_audio']: <object type="application/x-shockwave-flash" data="http://audio.lingvo.yandex.net/swf/lingvo/lingvo-player.swf" width="27" height="27" style="visibility: visible;"> <param name="allowscriptaccess" value="always"> <param name="wmode" value="transparent"> <param name="flashvars" value="color=0xFFFFFF&size=27&counter-path=slovari&count=yes&service-url=http://audio.lingvo.yandex.net&download-url-prefix=sounds&timestamp-url-prefix=timestamp.xml&language=SoundEn&sound-file={{ v['en'] }}.mp3"> </object> %end — {{ v['ru'] }}</li> %end </ul> %rebase layout title='Word translation' """ variants = {} internet = Session() for i in reversed(range((len(term) + 1) / 2, len(term) + 1)): url = 'http://suggest-slovari.yandex.ru/suggest-lingvo?v=2&lang=en&' + \ urllib.urlencode(dict(part=term[:i].encode('utf-8'))) response = internet.get(url) data = simplejson.loads(response.content) if data[0]: for trans, link in zip(*data[1:]): en, ru = trans.split(' - ', 1) variants[en] = dict(en=en, ru=ru, link=link) if len(variants) > 5: break def get_spelling(value): url = 'http://lingvo.yandex.ru/' + force_str(value['en']).replace(' ', '%20') + '/%D1%81%20%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%BE%D0%B3%D0%BE/' data = internet.get(url).content xml = ET.fromstring(force_str(data)) transcript = xml.find('*//{x}span[@class="b-translate__tr"]'.format(x=xhtml)) if transcript is None: value['transcript'] = '' else: value['transcript'] = transcript.text has_audio = xml.find('*//{x}h1[@class="b-translate__word"]//{x}span[@class="b-audio g-js"]'.format(x=xhtml)) value['has_audio'] = has_audio is not None return value variants = dict((key, get_spelling(value)) for key, value in variants.iteritems()) return dict(template=template, variants=sorted(variants.values()))
class LiteServBase(object): """Base class that each LiteServ platform need to inherit from. Look at LiteServMacOSX.py as an example of a plaform implementation of this. This class provides a few common functions as well as specifies the API that must be implemented in the subclass.""" def __init__(self, version_build, host, port, storage_engine): self.version_build = version_build self.host = host self.port = port self.storage_engine = storage_engine # Used for commandline programs such as net-mono and macosx self.process = None # For the subclasses, this property may be a file handle or a string self.logfile = None self.session = Session() self.session.headers['Content-Type'] = 'application/json' def download(self): raise NotImplementedError() def install(self): raise NotImplementedError() def start(self, logfile_name): raise NotImplementedError() def _verify_not_running(self): """ Verifys that the endpoint does not return a 200 from a running service """ try: resp = self.session.get("http://{}:{}/".format(self.host, self.port)) except ConnectionError: # Expecting connection error if LiteServ is not running on the port return log_r(resp) raise LiteServError("There should be no service running on the port") def _wait_until_reachable(self): url = "http://{}:{}".format(self.host, self.port) count = 0 while count < MAX_RETRIES: try: resp = self.session.get(url) # If request does not throw, exit retry loop break except ConnectionError: log_info("LiteServ may not be launched (Retrying) ...") time.sleep(1) count += 1 if count == MAX_RETRIES: raise LiteServError("Could not connect to LiteServ") return resp.json() def _verify_launched(self): raise NotImplementedError() def stop(self): raise NotImplementedError() def remove(self): raise NotImplementedError()
def test_session_HTTPS_200_OK_GET(self): s = Session() r = s.get(httpsbin('/')) self.assertEqual(r.status_code, 200)