def signed_call(self, method, path, query_params=None, body_params=None): if query_params is None: query_params = {} credentials = self.get_credentials() time = requests.get('https://eu.api.ovh.com/1.0/auth/time').content.decode('utf8') req = requests.Request( method, 'https://eu.api.ovh.com/1.0%s' % path, headers={ 'X-Ovh-Application': credentials.get('AK'), 'X-Ovh-Timestamp': time, 'X-Ovh-Consumer': credentials.get('CK') }, params=query_params, json=body_params ) prepped = req.prepare() signature_str = '+'.join([ credentials.get('AS'), credentials.get('CK'), prepped.method, prepped.url, prepped.body or '', time]).encode('utf8') prepped.headers['X-Ovh-Signature'] = '$1$' + hashlib.sha1(signature_str).hexdigest() res = requests.Session().send(prepped) return res.json()
def make_session() -> requests.Session: headers_path = Path(pkg_resources.resource_filename(__name__, 'headers.json')) if not headers_path.exists(): raise ValueError(f'headers file {headers_path} is missing') with headers_path.open('r', encoding='utf-8') as f1: headers = json.load(f1) session = requests.Session() session.headers.update(headers) return session
def get_session(): adapter = CacheControlAdapter( DictCache(), cache_etags=True, serializer=None, heuristic=None ) sess = requests.Session() sess.mount("http://", adapter) sess.mount("https://", adapter) sess.cache_controller = adapter.controller return sess
def getConfLocation(): session = requests.Session() session.trust_env = False #disable proxy response1 = session.get( 'http://172.18.0.4/api/data/config/?format=json&confname=libsso') response2 = session.get( 'http://172.18.0.4/api/data/config/?format=json&confname=cashmachine') address = ast.literal_eval(response1.text)[0]["confvalue"] portNo = ast.literal_eval(response2.text)[0]["confvalue"].encode() return address, portNo
def download_html(self, new_url): # print("download html: ", new_url) session = requests.Session() headers = { "User-Agent": "Mozilla/5.0(compatible;MSIE9.0;WindowsNT6.1;Trident/5.0;", "Accept": "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,*/*;q=0.8" } req = session.get(new_url, headers=headers) return req.text
def test_request(self): ''' Function that requests 3 times to te Billing Agent ''' # Configuration of the function POST # session = requests.Session() session.trust_env = False session.verify = False session.timeout = self.MAX_TIME # Requesting to the server (3 times) # self.time_begin = datetime.datetime.now() for _ in range(self.QUANT_REQUEST): logging.info('Requesting to -> ' + self.name_b_agent) initial_time = datetime.datetime.now() # Doing the request # response = session.post(self.url_request) self.list_status_codes.append(response.status_code) if response.status_code < 200 or response.status_code > 202: if (datetime.datetime.now().second - initial_time.second) < self.MAX_TIME: time.sleep(self.WAIT_TIME) self.counter_time_outs += 1 print('ERROR: ' + str(response.status_code) + ' -> ' + self.name_b_agent) else: print('SUCCESS: ' + str(response.status_code) + ' -> ' + self.name_b_agent) time.sleep(self.WAIT_TIME) self.time_finish = datetime.datetime.now()
# import requests import smtplib import pip._vendor.requests as requests import os with open("Currentmarksval.txt", "r") as f: current = int(f.read()) with requests.Session() as c: loginurl = 'https://webkiosk.thapar.edu/CommonFiles/UserAction.jsp' marksurl = "https://webkiosk.thapar.edu/StudentFiles/Exam/StudentEventMarksView.jsp?x=&exam=1920ODDSEM" headers = { "Connection": "keep-alive", "Origin": "https://webkiosk.thapar.edu", "Referer": "https://webkiosk.thapar.edu/", } logindata = { "txtuType": "Member Type", "UserType": "S", "txtCode": "Enrollment No", "MemberCode": os.getenv("RollNumber"), "txtPin": "Password/Pin", "Password": os.getenv("WebkioskPassword"), } c.post(loginurl, logindata, headers) page = c.get(marksurl) tr = int(page.text.count('tr') / 2) print(tr) if tr > current: try: msg = """Subject: New Marks Uploaded """ fromaddr = os.getenv("emailID")
def get_structure_model_twrp(self, device_param): try: response = requests.get(self.params['device_page']) response.raise_for_status() # обработка исключений 403, 404 и т.д. except HTTPError as http_err: print(f'HTTP error occurred: {http_err}') except Exception as err: print(f'Other error occurred: {err}') else: # подключаем драйвер driver = webdriver.Chrome() # в качестве параметра передаем веб-страницу проекта driver.get(self.params['device_page']) # находим элемент по параметру устройства (device_param) element = driver.find_element_by_partial_link_text(device_param) parents_path = [] new_path = '..' # находим элемент по XPath parent = element.find_element_by_xpath(new_path) # находим XPath до тега 'body' while (parent.tag_name != 'body'): parents_path.append(parent.tag_name) new_path += '/..' parent = element.find_element_by_xpath(new_path) xpath = '/' for e in parents_path[::-1]: xpath += "/" + e xpath += "/" + element.tag_name print('FIND PATH', xpath) # находим остальные устройства по XPath result = driver.find_elements(By.XPATH, xpath) #print(driver.find_elements(By.XPATH, xpath)) # массив, в котором содержатся все ссылки на производителей устройств links = [] for i in result: links.append(i.get_attribute("href")) # print(links) # находим информацию о каждом устройстве #считываем все ссылки на устройства for j in links: session = requests.Session() request = session.get(j) soup = BeautifulSoup(request.text, 'html.parser') links_of_devices = [] #ссылки на каждое устройство links2 = [] links_of_devices = soup.find_all("strong") for link in links_of_devices: links2.append(link.find("a").get("href")) #считываем информацию о каждом устройстве for i in links2: URL = "https://twrp.me" + i session = requests.Session() request = session.get(URL) soup = BeautifulSoup(request.text, 'html.parser') c = soup.text # записываем всю информацию об устройстве в 1 строку content = c.split() print(content)
def get_structure_parametres(self, device_param): try: response = requests.get(self.params['device_page']) response.raise_for_status() # обработка исключений 403, 404 и т.д. except HTTPError as http_err: print(f'HTTP error occurred: {http_err}') except Exception as err: print(f'Other error occurred: {err}') else: # подключаем драйвер driver = webdriver.Chrome() # в качестве параметра передаем веб-страницу проекта driver.get(self.params['device_page']) #находим элемент по параметру устройства (device_param) element = driver.find_element_by_partial_link_text(device_param) parents_path = [] new_path = '..' #находим элемент по XPath parent = element.find_element_by_xpath(new_path) #находим XPath до тега 'body' while (parent.tag_name != 'body'): parents_path.append(parent.tag_name) new_path += '/..' parent = element.find_element_by_xpath(new_path) xpath = '/' for e in parents_path[::-1]: xpath += "/" + e xpath += "/" + element.tag_name # print('FIND PATH', xpath) #находим остальные устройства по XPath result = driver.find_elements(By.XPATH, xpath) # print(driver.find_elements(By.XPATH, xpath)) #массив, в котором будет содержаться контент страниц устройств content_of_pages = [] for i in result: content_of_pages.append(i.text) # print(content_of_pages) #массив, в котором содержатся все ссылки на устройства links = [] years = [] for i in result: links.append(i.get_attribute("href")) # print(links) #находим информацию о каждом устройстве for j in links: URL = j session = requests.Session() try: request = session.get(URL) except pip._vendor.requests.exceptions.InvalidSchema: print('No connection adapters') else: soup = BeautifulSoup(request.text, 'html.parser') c = soup.text #записываем всю информацию об устройстве в 1 строку content = c.split() content1 = " ".join(content) # print(content1) year_of_release = re.search('[20]\d{3}', content1) if result: years.append(year_of_release.group(0)) # print(years) storage = FileStorage.FileStorage( 'parametres_of_device.fs') # создаем файл parametres_of_device.fs db = DB(storage) connection = db.open() # открываем БД root = connection.root( ) # Объект подключения позволяет получить доступ к корневому контейнеру с помощью вызова метода root() root['year'] = years transaction.commit() # сохранениe изменений в БД connection.close() # закроем соединение print(root.items()) # проверяем, что сохранилось в БД
# -*- coding=utf-8 -*- import csv # 加载csv包便于读取csv文件 import json from time import sleep from pip._vendor import requests from pip._vendor.requests.adapters import HTTPAdapter from pip._vendor.requests.packages.urllib3 import Retry session = requests.Session() retry = Retry(connect=10000, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) import pymysql conn = pymysql.connect(host='127.0.0.1', port=3306, user='******', passwd='haha', db='ireading') cur = conn.cursor() cur.execute("select Max(id) from word_slim e where e.word REGEXP '^[A-Za-z]+$' and e.word not in (select m.word from word_mini m) and uk_pron is not null and us_pron is not null") for item in cur: index = item[0] if item[0] else 0 cur.execute("select word from word_slim e where e.word REGEXP '^[A-Za-z]+$' and e.word not in (select m.word from word_mini m) and id>" + str(index)) l = [] c=0 count = index for item in cur: l.append(item[0]) print('finish') for word in l:
def main(): global myRequests if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context headers = { 'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' } myRequests = requests.Session() myRequests.headers.update(headers) if not getUUID(): print('获取uuid失败') return print('正在获取二维码图片...') showQRImage() while waitForLogin() != '200': pass os.remove(QRImagePath) if not login(): print('登录失败') return if not webwxinit(): print('初始化失败') return MemberList = webwxgetcontact() print('开启心跳线程') threading.Thread(target=heartBeatLoop) MemberCount = len(MemberList) print('通讯录共%s位好友' % MemberCount) ChatRoomName = '' result = [] d = {} for Member in MemberList: d[Member['UserName']] = (Member['NickName'], Member['RemarkName']) print('开始查找...') group_num = int(math.ceil(MemberCount / float(MAX_GROUP_NUM))) for i in range(0, group_num): UserNames = [] for j in range(0, MAX_GROUP_NUM): if i * MAX_GROUP_NUM + j >= MemberCount: break Member = MemberList[i * MAX_GROUP_NUM + j] UserNames.append(Member['UserName']) # 新建群组/添加成员 if ChatRoomName == '': (ChatRoomName, DeletedList, BlockedList) = createChatroom(UserNames) else: (DeletedList, BlockedList) = addMember(ChatRoomName, UserNames) # todo BlockedList 被拉黑列表 DeletedCount = len(DeletedList) if DeletedCount > 0: result += DeletedList # 删除成员 deleteMember(ChatRoomName, UserNames) # 进度条 progress = MAX_PROGRESS_LEN * (i + 1) / group_num print('[', '#' * int(progress), '-' * int(MAX_PROGRESS_LEN - progress), ']', end=' ') print('新发现你被%d人删除' % DeletedCount) for i in range(DeletedCount): if d[DeletedList[i]][1] != '': print('%s(%s)' % (d[DeletedList[i]][0], d[DeletedList[i]][1])) else: print(d[DeletedList[i]][0]) if i != group_num - 1: print('正在继续查找,请耐心等待...') # 下一次进行接口调用需要等待的时间 time.sleep(INTERFACE_CALLING_INTERVAL) # todo 删除群组 print('\n结果汇总完毕,20s后可重试...') resultNames = [] for r in result: if d[r][1] != '': resultNames.append('%s(%s)' % (d[r][0], d[r][1])) else: resultNames.append(d[r][0]) print('---------- 被删除的好友列表(共%d人) ----------' % len(result)) # 过滤emoji resultNames = list( map(lambda x: re.sub(r'<span.+/span>', '', x), resultNames)) if len(resultNames): print('\n'.join(resultNames)) else: print("无") print('---------------------------------------------')