def __download(self, q, url): request = requests.session() ID = self.__getTrakeId(url) fullurl = "http://api.soundcloud.com/i1/tracks/{0}/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28&app_version=8bae64e".format( ID) response = request.get(fullurl).text j = json.loads(response) link = j["http_mp3_128_url"] if link is not None: url = link else: raise Exception("Failed to get download link") request = requests.session() response = request.get(url, stream=True) a, c = self.__getTrackInfo(ID) filename = c + ".mp3" with open(filename, 'wb') as fd: total_length = int(response.headers.get('content-length')) # taken from http://stackoverflow.com/a/20943461 for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length / 1024)): if chunk: fd.write(chunk) fd.flush() filename = [filename, a, c] self.__addtags(filename) q.task_done()
def test_three_legged_auth(self): yes_or_no = raw_input("Do you want to skip Imgur three legged auth test? (y/n):") if yes_or_no.lower() in ['y', 'yes']: return for header_auth in (True, False): # Step 1: Obtaining a request token imgur_oauth_hook = OAuthHook( consumer_key=IMGUR_CONSUMER_KEY, consumer_secret=IMGUR_CONSUMER_SECRET, header_auth=header_auth ) client = requests.session(hooks={'pre_request': imgur_oauth_hook}) response = client.post('http://api.imgur.com/oauth/request_token') qs = parse_qs(response.text) oauth_token = qs['oauth_token'][0] oauth_secret = qs['oauth_token_secret'][0] # Step 2: Redirecting the user print "Go to http://api.imgur.com/oauth/authorize?oauth_token=%s and sign in into the application, then enter your PIN" % oauth_token oauth_verifier = raw_input('Please enter your PIN:') # Step 3: Authenticate new_imgur_oauth_hook = OAuthHook(oauth_token, oauth_secret, IMGUR_CONSUMER_KEY, IMGUR_CONSUMER_SECRET, header_auth) new_client = requests.session( hooks={'pre_request': new_imgur_oauth_hook} ) response = new_client.post('http://api.imgur.com/oauth/access_token', {'oauth_verifier': oauth_verifier}) response = parse_qs(response.content) token = response['oauth_token'][0] token_secret = response['oauth_token_secret'][0] self.assertTrue(token) self.assertTrue(token_secret)
def __init__(self, url, username=None, password=None, params=None, headers=None, cookies=None, timeout=None, session=None): self._url = url self._auth = () if username is not None: self._auth = SimpleAuth(username=username) if password is not None: self._auth = SimpleAuth(username=username, password=password) self._timeout = timeout self._params = default(params, {}) self._headers = default(headers, {}) self._cookies = default(cookies, {}) if session is None: # create request session object and try to auto detect proper ssl version self._session = requests.session() if self._url.startswith('https://'): SSLVer = None for SSLVer in SSLVers: try: self._session = requests.session() self._session.mount('https://', SSLAdapter(SSLVer)) self._session.get(self._url) break except requests.exceptions.SSLError: continue logger.debug('Detected SSL Version: %s' % SSLVersStr[SSLVer]) else: self._session = session
def getuserstatus(session=''): status = 'Guest' user1 = 'Guest' if session == '': session = requests.session() with open('cookies') as f: cookies = requests.utils.cookiejar_from_dict(pickle.load(f)) session = requests.session() session.cookies = cookies del session.cookies['c_visitor'] #print session.cookies #session = requests.session() headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0', 'Connection': 'keep-alive'} site = session.get('https://www.crunchyroll.com/acct/membership/', headers=headers, verify=True).text #open('tempfile','w').write(site).encoding('UTF-8') #print site.encode('utf-8') if re.search(re.escape(' ga(\'set\', \'dimension5\', \'registered\');'), site): status = 'Free Member' elif re.search(re.escape(' ga(\'set\', \'dimension5\', \'premium\');'), site): if re.search(re.escape(' ga(\'set\', \'dimension6\', \'premiumplus\');'), site): status = 'Premium+ Member' else: status = 'Premium Member' if status != 'Guest': user1 = re.findall('<a href=\"/user/(.+)\" ', site).pop() return [status,user1]
def test_login_api (): # create account with fb with session() as c: d = { "id" : '123456', "first_name" : 'Frëd', "last_name" : 'Low', "email" : '*****@*****.**', "location" : 'Mountain View, CA' } headers = {'content-type': 'application/json; charset=utf-8json'} request = c.post('%s/signup/fb' % HOST, data=json.dumps(d), headers=headers) assert request.content == '{"status": "new"}' with session() as c: d = { "id" : "123456", "first_name" : "Frëd", "last_name" : "Low", "email" : "*****@*****.**", "location" : "Mountain View, CA"} headers = {'content-type': '; charset=utf-8'} request = c.post('%s/signup/fb' % HOST, data=json.dumps(d), headers=headers) assert request.content == '{"status": "new"}' with session() as c: d = { "id" : "123", "first_name" : "Frëd", "last_name" : "Low", "email" : "*****@*****.**", "location" : "Mountain View, CA"} headers = {'content-type': '; charset=utf-8json'} request = c.post('%s/signup/fb' % HOST, data=json.dumps(d), headers=headers) assert request.content == '{"status": "new"}'
def fetch_production(country_code='BO', session=None): #Define actual and last day (for midnight data) now = arrow.now(tz=tz_bo) formatted_date = now.format('YYYY-MM-DD') past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format('YYYY-MM-DD') #Define output frame actual_hour = now.hour data = [dict() for h in range(actual_hour+1)] #initial path for url to request url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag=' #Start with data for midnight url = url_init + past_formatted_date #Request and rearange in DF r = session or requests.session() response = r.get(url) obj = webparser(response) data_temp = fetch_hourly_production(country_code, obj, 0, formatted_date) data[0] = data_temp #Fill data for the other hours until actual hour if actual_hour>1: url = url_init + formatted_date #Request and rearange in DF r = session or requests.session() response = r.get(url) obj = webparser(response) for h in range(1, actual_hour+1): data_temp = fetch_hourly_production(country_code, obj, h, formatted_date) data[h] = data_temp return data
def __init__(self, api_key=None, api_secret=None, oauth_token=None, \ oauth_token_secret=None, callback_url='', headers=None): self.api_key = api_key and u'%s' % api_key self.api_secret = api_secret and u'%s' % api_secret self.oauth_token = oauth_token and u'%s' % oauth_token self.oauth_token_secret = oauth_token_secret and u'%s' % oauth_token_secret self.callback_url = callback_url self.request_token_url = 'http://api.netflix.com/oauth/request_token' self.access_token_url = 'http://api.netflix.com/oauth/access_token' self.authorize_url = 'https://api-user.netflix.com/oauth/login' self.old_api_base = 'http://api.netflix.com/' self.api_base = 'http://api-public.netflix.com/' default_headers = {'User-agent': 'Python-Netflix v%s' % __version__} self.headers = default_headers.update(headers or {}) self.client = requests.session(headers=self.headers) self.auth = None if self.api_key is not None and self.api_secret is not None: self.auth = OAuth1(self.api_key, self.api_secret, signature_type='auth_header') if self.oauth_token is not None and self.oauth_token_secret is not None: self.auth = OAuth1(self.api_key, self.api_secret, self.oauth_token, self.oauth_token_secret, signature_type='auth_header') if self.auth is not None: self.client = requests.session(headers=self.headers, auth=self.auth)
def interfaceTest(num, api_purpose, api_host, request_method,request_data_type, request_data, check_point, s=None): headers = {'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With' : 'XMLHttpRequest', 'Connection' : 'keep-alive', 'Referer' : 'http://' + api_host, 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36' } if s == None: s = requests.session() if request_method == 'POST': if request_url != '/login' : r = s.post(url='http://'+api_host+request_url, data = json.loads(request_data), headers = headers) #由于此处数据没有经过加密,所以需要把Json格式字符串解码转换成**Python对象** elif request_url == '/login' : s = requests.session() r = s.post(url='http://'+api_host+request_url, data = request_data, headers = headers) #由于登录密码不能明文传输,采用MD5加密,在之前的代码中已经进行过json.loads()转换,所以此处不需要解码 else: logging.error(num + ' ' + api_purpose + ' HTTP请求方法错误,请确认[Request Method]字段是否正确!!!') s = None return 400, resp, s status = r.status_code resp = r.text print resp if status == 200 : if re.search(check_point, str(r.text)): logging.info(num + ' ' + api_purpose + ' 成功,' + str(status) + ', ' + str(r.text)) return status, resp, s else: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '], ' + str(r.text)) return 200, resp , None else: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '],' + str(r.text)) return status, resp.decode('utf-8'), None
def handle_auth(self): """ Handles test authentication. """ login = self.login_options if self.login_type == models_base.LoginType.NONE: session = requests.session() elif self.login_type == models_base.LoginType.BASIC: session = requests.session(auth=(login['user'], login['password'])) elif self.login_type == models_base.LoginType.COOKIE: session = requests.session() params = {login['login_field']: login['login'], login['password_field']: login['password']} session.post(self.login_info['url'], params=params) elif self.login_type == models_base.LoginType.OAUTH: params = dict(access_token=login['access_token'], access_token_secret=login['access_token_secret'], consumer_key=login['consumer_key'], consumer_secret=login['consumer_secret'], header_auth=login.get('header_auth', True)) oauth_hook = OAuthHook(**params) session = requests.session(hooks={'pre_request': oauth_hook}) logger.info('Attaching OAuth info.') else: raise NotImplementedError('%s login type not implemented' % self.login_type) return session
def __init__(self, *args, **kwargs): super(Carrier, self).__init__(*args, **kwargs) # Get configuration self.config = Config(os.getcwd()) self.config.from_object(defaults) if "CARRIER_CONF" in os.environ: self.config.from_envvar("CARRIER_CONF") # Initalize app logging.config.dictConfig(self.config["LOGGING"]) store = redis.StrictRedis(**dict([(k.lower(), v) for k, v in self.config["REDIS"].items()])) wsession = requests.session( auth=( self.config["WAREHOUSE_AUTH"]["USERNAME"], self.config["WAREHOUSE_AUTH"]["PASSWORD"], ), headers={"User-Agent": user_agent()}, ) warehouse = forklift.Forklift(session=wsession) warehouse.url = self.config["WAREHOUSE_URI"] psession = requests.session(verify=self.config["PYPI_SSL_VERIFY"], headers={"User-Agent": user_agent()}) ptransports = [xmlrpc2.client.HTTPTransport(session=psession), xmlrpc2.client.HTTPSTransport(session=psession)] pypi = xmlrpc2.client.Client(self.config["PYPI_URI"], transports=ptransports) self.processor = Processor(warehouse, pypi, store)
def _request(self, url, method = u"get", data = None, headers=None, **kwargs): """ does the request via requests - oauth not implemented yet - use basic auth please """ # if self.access_token: # auth_header = { # u"Authorization": "Bearer %s" % (self.access_token) # } # headers.update(auth_header) #basic auth msg = "method: %s url:%s\nheaders:%s\ndata:%s" % ( method,url,headers,data) #print msg if not self.use_oauth: auth=(self.sk_user, self.sk_pw) if not self.client: self.client = requests.session() r = self.client.request(method, url, headers=headers, data=data, auth=auth,**kwargs) else: if not self.client: self.client = requests.session(hooks={'pre_request': oauth_hook}) r = self.client.request(method, url, headers=headers, data=data,**kwargs) return r
def interfaceTest(num, api_purpose, api_host, request_method, request_data_type, request_data, check_point, s=None): headers = {'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With' : 'XMLHttpRequest', 'Connection' : 'keep-alive', 'Referer' : 'http://' + api_host, 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) \ppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36' } if s == None: s = requests.session() if request_method == 'POST': if request_url != '/login' : r = s.post(url='http://'+api_host+request_url, \ data = json.loads(request_data), headers = headers) #data has no encryption,so need decode Json charactor change to python objects. elif request_url == '/login' : s = requests.session() r = s.post(url='http://'+api_host+request_url, \ data = request_data, headers = headers) #used MD5 encryption,the before codes have been changed to json.loads(),so no need to encryption. else: logging.error(num + ' ' + api_purpose + ' HTTP请求方法错误,请确认[Request Method]字段是否正确!!!') s = None return 400, resp, s status = r.status_code resp = r.text print resp if status == 200 : if re.search(check_point, str(r.text)): logging.info(num + ' ' + api_purpose + ' 成功,' + str(status) + ', ' + str(r.text)) return status, resp, s elif: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '], ' + str(r.text)) return 200, resp , None else: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '],' + str(r.text)) return status, resp.decode('utf-8'), None
def dump_data_from_api(start_id, finish_id): req = "http://api.wotblitz.ru/wotb/account/info/?application_id={}&fields=nickname&account_id={}" filename = "nicknames_dump_" +str(start_id) + "_" + str(finish_id) f = open(filename, 'a') S = requests.session() for i in range((finish_id - start_id) // 100): if i % 10 == 0: logging.critical("current start_id: {}".format(str(start_id + i*100))) account_ids_list = [] for account_id in range(start_id + i*100, start_id + (i+1)*100): account_ids_list.append(str(account_id)) full_req = req.format(config.wargaming_id, ",".join(account_ids_list)) # with eventlet.Timeout(30): response = S.get(full_req, timeout=30).json() try: nicknames = extract_nickname_from_response(response) except SourceNotAvailableException: logging.error("Caught SOURCE_NOT_AVAILABLE, start_id + i*100 = " + str(start_id + i*100)) S.close() time.sleep(1) S = requests.session() response = S.get(full_req, timeout=30).json() nicknames = extract_nickname_from_response(response) for i in nicknames: f.write(i+"\n") f.close()
def login(): status_code = 0 username = raw_input('Enter username:'******'Enter password:'******'http://' + base + '/file_demo/login/') csrf = client.cookies['csrftoken'] credentials = {'username': username, 'password': password} header = {'X-CSRFToken': csrf} web = client.post('http://' + base + '/file_demo/login/', data=credentials, headers=header) status_code = web.status_code secure_cookie = web.cookies while status_code !=200: print "incorrect combo!" username = raw_input('Enter username:'******'Enter password:'******'http://'+ base +'/file_demo/login/') csrf = client.cookies['csrftoken'] credentials = {'username': username, 'password': password} header = {'X-CSRFToken': csrf} web = client.post('http://'+ base +'/file_demo/login/', data=credentials, headers=header) status_code = web.status_code secure_cookie = web.cookies return secure_cookie
def loadCitiesFromOsm(): """ Load all cities from OpenStreetMap, use bounding box for position :return: rendered template """ global URL map_details = Map.getMaps()[0].getMapBox(tilepath=current_app.config.get('PATH_TILES')) SEARCHSTRING = 'rel[boundary=administrative](%s,%s,%s,%s);out;' % (map_details['min_latdeg'], map_details['min_lngdeg'], map_details['max_latdeg'], map_details['max_lngdeg']) # search all cities in bounding box requests.session() r = requests.post(URL, data={'data': SEARCHSTRING}) xmldoc = minidom.parseString(r.content) relations = xmldoc.getElementsByTagName('relation') osmids = [c.osmid for c in City.getCities()] cities = [] for relation in relations: for tag in relation.childNodes: if tag.nodeName == "tag" and tag.attributes['k'].value == 'name': cities.append([relation.attributes['id'].value, tag.attributes['v'].value, int(relation.attributes['id'].value) in osmids]) cities.sort(lambda x, y: cmp(x[1], y[1])) return render_template('admin.streets.city_osm.html', cities=cities)
def newSession(): """ Returns a new Requests session with pre-loaded default HTTP Headers Generates a new Requests session and consults with the Configuration class to determine if a Configuration exists and attempts to use the configured HTTP Request headers first. If this fails, it attempts to create a new default configuration and use those values. Finally, if a configuration cannot be initiaized it uses the hard-coded Mozilla headers. Returns request-client - The configured Requests session Raises HTTPException """ from neolib.config.Configuration import Configuration s = requests.session() if not Configuration.loaded(): if not Configuration.initialize(): s.headers.update(Page._defaultVars) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) return requests.session()
def interfaceTest(num, api_purpose, api_host, request_method, request_data_type, request_data, check_point, s=None): headers = {'content-type': 'application/json','' 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Accept':'application/x-ms-application, image/jpeg, application/xaml+xml, image/gif, image/pjpeg, application/x-ms-xbap, */*', 'Accept-Language':'zh-CN'} if s == None: s = requests.session() if request_method == 'POST': if request_url != '/login' : r = s.post(url='http://'+api_host+request_url, data = json.loads(request_data), headers = headers) #由于此处数据没有经过加密,所以需要把Json格式字符串解码转换成**Python对象** elif request_url == '/login' : s = requests.session() r = s.post(url='http://'+api_host+request_url, data = request_data, headers = headers) #由于登录密码不能明文传输,采用MD5加密,在之前的代码中已经进行过json.loads()转换,所以此处不需要解码 else: logging.error(num + ' ' + api_purpose + ' HTTP请求方法错误,请确认[Request Method]字段是否正确!!!') s = None return 400 status = r.status_code resp = r.text print resp if status == 200 : if re.search(check_point, str(r.text)): logging.info(num + ' ' + api_purpose + ' 成功,' + str(status) + ', ' + str(r.text)) return status, resp, s else: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '], ' + str(r.text)) return 200, resp , None else: logging.error(num + ' ' + api_purpose + ' 失败!!!,[' + str(status) + '],' + str(r.text)) return status, resp.decode('utf-8'), None
def __init__(self, app_key=None, app_secret=None, oauth_token=None, oauth_token_secret=None, \ headers=None, callback_url=None, twitter_token=None, twitter_secret=None, proxies=None, version='1.1'): """Instantiates an instance of Twython. Takes optional parameters for authentication and such (see below). :param app_key: (optional) Your applications key :param app_secret: (optional) Your applications secret key :param oauth_token: (optional) Used with oauth_token_secret to make authenticated calls :param oauth_token_secret: (optional) Used with oauth_token to make authenticated calls :param headers: (optional) Custom headers to send along with the request :param callback_url: (optional) If set, will overwrite the callback url set in your application :param proxies: (optional) A dictionary of proxies, for example {"http":"proxy.example.org:8080", "https":"proxy.example.org:8081"}. """ # Needed for hitting that there API. self.api_version = version self.api_url = 'https://api.twitter.com/%s' self.request_token_url = self.api_url % 'oauth/request_token' self.access_token_url = self.api_url % 'oauth/access_token' self.authorize_url = self.api_url % 'oauth/authorize' self.authenticate_url = self.api_url % 'oauth/authenticate' # Enforce unicode on keys and secrets self.app_key = app_key and unicode(app_key) or twitter_token and unicode(twitter_token) self.app_secret = app_key and unicode(app_secret) or twitter_secret and unicode(twitter_secret) self.oauth_token = oauth_token and u'%s' % oauth_token self.oauth_token_secret = oauth_token_secret and u'%s' % oauth_token_secret self.callback_url = callback_url # If there's headers, set them, otherwise be an embarassing parent for their own good. self.headers = headers or {'User-Agent': 'Twython v' + __version__} # Allow for unauthenticated requests self.client = requests.session(proxies=proxies) self.auth = None if self.app_key is not None and self.app_secret is not None and \ self.oauth_token is None and self.oauth_token_secret is None: self.auth = OAuth1(self.app_key, self.app_secret, signature_type='auth_header') if self.app_key is not None and self.app_secret is not None and \ self.oauth_token is not None and self.oauth_token_secret is not None: self.auth = OAuth1(self.app_key, self.app_secret, self.oauth_token, self.oauth_token_secret, signature_type='auth_header') if self.auth is not None: self.client = requests.session(headers=self.headers, auth=self.auth, proxies=proxies) # register available funcs to allow listing name when debugging. def setFunc(key): return lambda **kwargs: self._constructFunc(key, **kwargs) for key in api_table.keys(): self.__dict__[key] = setFunc(key) # create stash for last call intel self._last_call = None
def session(headers=None): ''' Session wrapper for convenience ''' if headers: return requests.session(auth=AUTH, headers=headers) else: return requests.session(auth=AUTH)
def __init__(self, twitter_token=None, twitter_secret=None, oauth_token=None, oauth_token_secret=None, \ headers=None, callback_url=None): """setup(self, oauth_token = None, headers = None) Instantiates an instance of Twython. Takes optional parameters for authentication and such (see below). Parameters: twitter_token - Given to you when you register your application with Twitter. twitter_secret - Given to you when you register your application with Twitter. oauth_token - If you've gone through the authentication process and have a token for this user, pass it in and it'll be used for all requests going forward. oauth_token_secret - see oauth_token; it's the other half. headers - User agent header, dictionary style ala {'User-Agent': 'Bert'} client_args - additional arguments for HTTP client (see httplib2.Http.__init__), e.g. {'timeout': 10.0} ** Note: versioning is not currently used by search.twitter functions; when Twitter moves their junk, it'll be supported. """ OAuthHook.consumer_key = twitter_token OAuthHook.consumer_secret = twitter_secret # Needed for hitting that there API. self.request_token_url = 'http://twitter.com/oauth/request_token' self.access_token_url = 'http://twitter.com/oauth/access_token' self.authorize_url = 'http://twitter.com/oauth/authorize' self.authenticate_url = 'http://twitter.com/oauth/authenticate' self.twitter_token = twitter_token self.twitter_secret = twitter_secret self.oauth_token = oauth_token self.oauth_secret = oauth_token_secret self.callback_url = callback_url # If there's headers, set them, otherwise be an embarassing parent for their own good. self.headers = headers if self.headers is None: self.headers = {'User-agent': 'Twython Python Twitter Library v1.4.6'} self.client = None if self.twitter_token is not None and self.twitter_secret is not None: self.client = requests.session(hooks={'pre_request': OAuthHook()}) if self.oauth_token is not None and self.oauth_secret is not None: self.oauth_hook = OAuthHook(self.oauth_token, self.oauth_secret) self.client = requests.session(hooks={'pre_request': self.oauth_hook}) # Filter down through the possibilities here - if they have a token, if they're first stage, etc. if self.client is None: # If they don't do authentication, but still want to request unprotected resources, we need an opener. self.client = requests.session() # register available funcs to allow listing name when debugging. def setFunc(key): return lambda **kwargs: self._constructFunc(key, **kwargs) for key in api_table.keys(): self.__dict__[key] = setFunc(key)
def get_cache(directory, max_bytes, logger=warnings): forever = True if directory else False if forever: fc = LimitedFileCache(directory, forever=forever, max_bytes=max_bytes, logger=logger) return CacheControl(requests.session(), cache=fc) else: # not forever so just cache within this run return CacheControl(requests.session())
def __init__( self, app_key=None, app_secret=None, oauth_token=None, oauth_token_secret=None, headers=None, callback_url=None, pool_maxsize=None, ): # Define some API URLs real quick self.base_api_url = "http://api.tumblr.com" self.api_version = "v2" self.api_url = "%s/%s/" % (self.base_api_url, self.api_version) # Authentication URLs self.request_token_url = "http://www.tumblr.com/oauth/request_token" self.access_token_url = "https://www.tumblr.com/oauth/access_token" self.authorize_url = "https://www.tumblr.com/oauth/authorize" self.authenticate_url = "https://www.tumblr.com/oauth/authorize" self.callback_url = callback_url self.default_params = {"api_key": app_key} # If there's headers, set them, otherwise be an embarassing parent self.headers = headers or {"User-Agent": "Tumblpy v" + __version__} if pool_maxsize: requests_config = {"pool_maxsize": pool_maxsize} else: requests_config = {} # Allow for unauthenticated requests self.client = requests.session(config=requests_config) self.auth = None if app_key and app_secret: self.app_key = unicode(app_key) or app_key self.app_secret = unicode(app_secret) or app_key if oauth_token and oauth_token_secret: self.oauth_token = unicode(oauth_token) self.oauth_token_secret = unicode(oauth_token_secret) if app_key and app_secret and not oauth_token and not oauth_token_secret: self.auth = OAuth1(self.app_key, self.app_secret, signature_type="auth_header") if app_key and app_secret and oauth_token and oauth_token_secret: self.auth = OAuth1( self.app_key, self.app_secret, self.oauth_token, self.oauth_token_secret, signature_type="auth_header" ) if self.auth is not None: self.client = requests.session(headers=self.headers, auth=self.auth, config=requests_config)
def get_article_url(page_list, url_list, title_list, pop_list, index_list): url_all = [] title_all = [] pop_all = [] rs = requests.session() res = rs.post(ask, verify = False, data = payload) res = rs.get(url, verify = False) s = bsp(res.text, "html.parser") page_handle(s, url_all, title_all, pop_all) url_all.reverse() url_list.extend(url_all) title_all.reverse() title_list.extend(title_all) pop_all.reverse() pop_list.extend(pop_all) link = get_prev(s) page_list.append(link) for i in page_list: url_all = [] title_all = [] pop_all = [] rs = requests.session() res = rs.post(ask, verify = False, data = payload) res = rs.get(i, verify = False) s = bsp(res.text, "html.parser") page_handle(s, url_all, title_all, pop_all) url_all.reverse() url_list.extend(url_all) title_all.reverse() title_list.extend(title_all) pop_all.reverse() pop_list.extend(pop_all) print('=============',i,'==============') for j in s.select('.btn-group-paging'): page_link_result = j.findAll('a', class_='btn wide') page_link = page_link_result[1] page_link = page_link['href'] link = 'https://www.ptt.cc' + page_link time.sleep(0.3) print('Fetching ... ') if(check_today(s) == 1): page_list.append(link) index_url(url_list, index_list) select_article(url_list, title_list, pop_list, index_list) #print_list(pop_list, title_list, url_list, index_list) """
def test_entry_points(self): requests.session requests.session().get requests.session().head requests.get requests.head requests.put requests.patch requests.post
def test_install_uninstall(self): for _ in range(2): requests_cache.install_cache(name=CACHE_NAME, backend=CACHE_BACKEND) self.assertTrue(isinstance(requests.Session(), CachedSession)) self.assertTrue(isinstance(requests.sessions.Session(), CachedSession)) self.assertTrue(isinstance(requests.session(), CachedSession)) requests_cache.uninstall_cache() self.assertFalse(isinstance(requests.Session(), CachedSession)) self.assertFalse(isinstance(requests.sessions.Session(), CachedSession)) self.assertFalse(isinstance(requests.session(), CachedSession))
def go(squadron_dir, squadron_state_dir = None, config_file = None, node_name = None, status_server = None, dont_rollback = False, force = False, dry_run = True): """ Gets the config and applies it if it's not a dry run. Keyword arguments: squadron_dir -- where the Squadron description dir is squadron_state_dir -- where Squadron should store its state between runs config_file -- overall config file location node_name -- what this node is called status_server -- the hostname (and optionally port) of the HTTPS server to send status to dont_rollback -- if true, doesn't automatically rollback to the previous version force -- treat all files as created, always deploy dry_run -- whether or not to apply changes """ send_status = False try: first_squadron_dir = get_squadron_dir(squadron_dir, None) config = parse_config(first_squadron_dir, log, config_file) log.debug("Got config {}".format(config)) squadron_dir = get_squadron_dir(squadron_dir, config) if squadron_state_dir is None: squadron_state_dir = config['statedir'] if node_name is None: node_name = config['nodename'] if config['send_status'].lower() == 'true': send_status = True if status_server is None: status_server = config['status_host'] status_apikey = config['status_apikey'] status_secret = config['status_secret'] log.info("Sending status to {} with {}/{}".format(status_server, status_apikey, status_secret)) info = _run_squadron(squadron_dir, squadron_state_dir, node_name, dont_rollback, force, dry_run) except UserException as e: # This is a user error, don't print a stack trace log.error(e.message) except Exception as e: if send_status and not dry_run: status.report_status(requests.session(), status_server, status_apikey, status_secret, str(uuid.uuid4()), True, status='ERROR', hostname=node_name, info={'info':True, 'message':str(e)}) log.exception('Caught exception') raise e else: #executes on no exception if send_status and not dry_run and info: status.report_status(requests.session(), status_server, status_apikey, status_secret, str(uuid.uuid4()), True, status='OK', hostname=node_name, info=info)
def trololol_get(ip,flag): s=requests.session() s = requests.session() file = open(flag+'.data', 'r') token = file.read(10) payload = {'id': token} res = s.post("http://" + ip + HTTPORT + BASEDIR, data=payload) if(flag in res.text): print "worked"; return True; return False;
def test_session_connection_error_with_safe_mode(self): config = {"safe_mode":True} s = requests.session() r = s.get("http://localhost:1/nope", timeout=0.1, config=config) self.assertFalse(r.ok) self.assertTrue(r.content is None) s2 = requests.session(config=config) r2 = s2.get("http://localhost:1/nope", timeout=0.1) self.assertFalse(r2.ok) self.assertTrue(r2.content is None)
def get_base_reports(job, job_dir): session = requests.session() with requests.session() as session: session.headers["User-Agent"] = "BHL Archive-It QA" hosts_report = session.get('https://partner.archive-it.org/seam/resource/report?crawlJobId=' + job + '&type=host') with open(join(job_dir,'hosts.csv'),'wb') as host_csv: host_csv.write(hosts_report.content) status_report = session.get('https://partner.archive-it.org/seam/resource/report?crawlJobId=' + job + '&type=seed') with open(join(job_dir,'seedstatus.csv'),'wb') as status_csv: status_csv.write(status_report.content) source_report = session.get('https://partner.archive-it.org/seam/resource/report?crawlJobId=' + job + '&type=source') with open(join(job_dir,'seedsource.csv'),'wb') as source_csv: source_csv.write(source_report.content)
def test_unpickled_session_requests(self): s = requests.session() r = get(httpbin('cookies', 'set', 'k', 'v'), allow_redirects=True, session=s) c = json.loads(r.text).get('cookies') self.assertTrue('k' in c) ds = pickle.loads(pickle.dumps(s)) r = get(httpbin('cookies'), session=ds) c = json.loads(r.text).get('cookies') self.assertTrue('k' in c) ds1 = pickle.loads(pickle.dumps(requests.session())) ds2 = pickle.loads(pickle.dumps(requests.session(prefetch=False))) self.assertTrue(ds1.prefetch) self.assertFalse(ds2.prefetch)
import pillarsdk import pillarsdk.exceptions import pillarsdk.utils from pillarsdk.utils import sanitize_filename from . import cache SUBCLIENT_ID = 'PILLAR' TEXTURE_NODE_TYPES = {'texture', 'hdri'} RFC1123_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' _pillar_api = { } # will become a mapping from bool (cached/non-cached) to pillarsdk.Api objects. log = logging.getLogger(__name__) uncached_session = requests.session() _testing_blender_id_profile = None # Just for testing, overrides what is returned by blender_id_profile. _downloaded_urls = set() # URLs we've downloaded this Blender session. class UserNotLoggedInError(RuntimeError): """Raised when the user should be logged in on Blender ID, but isn't. This is basically for every interaction with Pillar. """ def __str__(self): return self.__class__.__name__ class CredentialsNotSyncedError(UserNotLoggedInError): """Raised when the user may be logged in on Blender ID, but has no Blender Cloud token."""
headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Origin': 'http://vip.biancheng.net', 'Host': 'vip.biancheng.net', 'Referer': 'http://vip.biancheng.net', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36', 'Upgrade-Insecure-Requests': '1', } session_requests = requests.session() login_url = "http://vip.biancheng.net/login.php" result = session_requests.get(login_url) # tree = html.fromstring(result.text) # authenticity_token = list(set(tree.xpath("//input[@name='csrfmiddlewaretoken']/@value")))[0] # username=niejn&password=cxf8922470&submit=%E7%99%BB%C2%A0%C2%A0%C2%A0%C2%A0%E5%BD%95 payload = { "username": "******", "password": "******", "submit": "%E7%99%BB%C2%A0%C2%A0%C2%A0%C2%A0%E5%BD%95" } result = session_requests.post( login_url, data = payload,
def make_user_login(roomid): issue_id = 2326621 for i in range(50): headers = {'Accept': 'application/json,application/xml,application/xhtml+xml,text/html;q=0.9,image/webp,*/*;q=0.8' , 'Accept-Encoding': 'gzip, deflate' , 'Accept-Language': 'zh-CN,zh' ,'Authorization': 'Bearer YNnlc05P8Hj8JE6AlSgIXd2su9zD2jZ69ORY3RoIgJ3MlMVppcpwzoSl8Ayb5Cj6VKWz64bak3vZHKTRf-l5gEabU9pnsY-Y1ZpooPD-TYFye6X0-ae6bMoOeO-LwU8MKVD1JIbyEdsQIoce9BE_hAIWYCo-s4KnhGdGHA83O220oOg77dLHVPx-N4TMGSRZSvWL-K3tr1EGhp8kxwfY3w' , 'Connection': 'keep-alive' , 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8' ,'User-Agent': 'Mozilla/5.0 (Linux; U; Android 7.1.1; zh-cn; OS105 Build/NGI77B) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30' , 'Content-Length': '78' , 'Host': '45.61.241.170:5566'} datas = { 'user_name' : 'b478934288' ,'password' : '123456a' ,'client_type' : '1' ,'client_Id' : 'Android/osborn/osborn:7.1.1/NGI77B/1534640972:user/dev-keys' ,'Client_Ip' : 'yiqpdRfXbR1Notuz99Oueg%3D%3D' } params = { 'page' : i ,'roomid' : roomid } body = { 'user_name': 'b478934288' , 'password': '******' , 'client_type': '1' , 'client_Id': 'Android/osborn/osborn:7.1.1/NGI77B/1534640972:user/dev-keys' , 'Client_Ip': 'yiqpdRfXbR1Notuz99Oueg%3D%3D' } session = requests.session() # url='http://45.61.241.170:5566/Api/User/login' # resp = session.post(url, data=datas, headers=headers) # rejson = resp.content.decode('utf-8') # redata = json.loads(rejson)[ 'result'] s = session.get('http://45.61.243.44:5566/Api/Room/GetRoomTop10Chats' , params=params , headers=headers ,json=body ) userdata = json.loads(s.content.decode('utf-8')) now_issue_id = int(userdata['result'][0]['body'].split('@')[0]) if now_issue_id < issue_id: break else: issue_id = now_issue_id for result in userdata['result']: # 2326621 @ 双 @ 20 @ 7 @ 2.0 @ 204 # 2326621 @ 大单 @ 33 @ 7 @ 4.2 @ 208 # 2326621 @ 大 @ 50 @ 7 @ 2.0 @ 201 data_split = result['body'].split('@') if data_split[1] == '大单': user_result['大单'] += int(data_split[2]) if data_split[1] == '小单': user_result['小单'] += int(data_split[2]) if data_split[1] == '大双': user_result['大双'] += int(data_split[2]) if data_split[1] == '小双': user_result['小双'] += int(data_split[2]) if data_split[1] == '大': user_result['大'] += int(data_split[2]) if data_split[1] == '小': user_result['小'] += int(data_split[2]) if data_split[1] == '单': user_result['单'] += int(data_split[2]) if data_split[1] == '双': user_result['双'] += int(data_split[2]) #print("{0}{1}{2}元".format(result['nickname'].center(30) , data_split[1].center(20), data_split[2].center(10))) time.sleep(0.2) print(user_result)
import cookielib import requests cookie_jar=cookielib.LWPCookieJar() session1=requests.session() session1.cookies=cookie_jar headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36', 'Host': 'tieba.baidu.com', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.9', } response1=session1.request(method='get',url='https://www.baidu.com',headers=headers) response2=session1.request(method='get',url='http://tieba.baidu.com/p/5465046117?traceid=',headers=headers) response3=session1.request(method='get',url='https://weibo.com') response4=session1.request(method='get',url='https://www.csdn.net/') for i in session1.cookies:
def __init__(self, url): super(ProxyWebChecker).__init__() self._s = requests.session() self._s.headers.update(HEADERS) self._url = url self._time_out = 24
ascii += " ={> P R O J E C T I N S E C U R I T Y <}= \r\n" ascii += " \r\n" ascii += " Twitter : >@Insecurity< \r\n" ascii += " Site : >insecurity.sh< \r\n" green = "\033[1;32m" red = "\033[1;31m" clear = "\033[0m" load = "[>$<] ".replace(">", green).replace("<", clear) err = "[>-<] ".replace(">", red).replace("<", clear) intro = ascii.replace(">", green).replace("<", clear) print(intro) with requests.session() as s: login = { "new_login_session_management": "1", "authProvider": "Default", "authUser": args.user, "clearPass": args.password, "languageChoice": "1" } print(load + "Authenticating with " + args.user + ":" + args.password) r = s.post(args.host + "/interface/main/main_screen.php?auth=login&site=default", data=login) if "login_screen.php?error=1&site=" in r.text: print(err + "Failed to Login.") sys.exit(0)
#如果取得的page是json数据,则采用下面方法转换,如果不是json,则转换异常为:JSONDecodeError import json data=json.loads(page) ''' import requests import urllib import random from datetime import datetime import http.cookiejar as cookielib # session代表某一次连接 huihuSession = requests.session() # 因为原始的session.cookies 没有save()方法,所以需要用到cookielib中的方法LWPCookieJar,这个类实例化的cookie对象,就可以直接调用save方法。 huihuSession.cookies = cookielib.LWPCookieJar(filename = "huihuCookies.txt") userAgent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36" header = { # "origin": "https://passport.huihu.cn", "Referer": "http://hh.haiper.com.cn/w/wander/user/login/", 'User-Agent': userAgent, } def login(account, password): # print ("开始模拟登录嘻嘻嘻") postUrl = "http://192.168.2.107/otcwms"
def __init__(self): self.base_url = "http://match.yuanrenxue.com/api/match/4" self.session = requests.session()
aMySQLcrptmktDB.m_sMarketNameAbbr = g_sMarketNameAbbr aMySQLcrptmktDB.m_nScrapingFreqDaysProductDesc = g_nScrapingFreqDaysProductDesc aMySQLcrptmktDB.m_nScrapingFreqDaysProductRating = g_nScrapingFreqDaysProductRating aMySQLcrptmktDB.m_nScrapingFreqDaysVendorProfile = g_nScrapingFreqDaysVendorProfile aMySQLcrptmktDB.m_nScrapingFreqDaysVendorRating = g_nScrapingFreqDaysVendorRating aMySQLcrptmktDB.MySQLQueryBasicInfor() g_nMarketGlobalID = aMySQLcrptmktDB.m_nMarketGlobalID if g_bUseDefaultUsernamePasswd: g_sMarketUserName = aMySQLcrptmktDB.m_sMarketUserName g_sMarketPassword = aMySQLcrptmktDB.m_sMarketPassword aBrowserDriver = selenium_setup_firefox_network() Login(aBrowserDriver) # Pass cookies from selenium to url requests. # In future we can save these in db and use them until they expire. loginSession = requests.session() for cookie in aBrowserDriver.get_cookies(): loginSession.cookies.update({cookie['name']: cookie['value']}) # Add New Products to product_list and vendors to vendors_list for cat_code in g_startIndexes: categoryLink = g_sMarketURL + cat_code catContent = loginSession.get(categoryLink, proxies=proxies).content paginationDiv = list( map( lambda x: x.text, BeautifulSoup(catContent, 'html.parser').find_all('li', class_='page-item'))) pageIndices = list( map(int, list(filter(lambda x: x.isdigit(), paginationDiv))))
def __init__(self, log_response=False): self._r = session() self._log_response = log_response
proxy_list = [ { 'https': '101.81.10.171:39522', 'http': '114.217.3.70:808', }, { 'https': '119.1.97.193:60916', 'http': '61.138.33.20:808' } ] proxies = random.choice(proxy_list) data = { 'email': '18323291912', 'password': '******' } session = requests.session() # 发起第一次请求 req1 = session.post(url, data, proxies) with open('renren1.html', 'w', encoding='utf-8')as f: f.write(req1.text) # 第二次请求 req2 = session.get(url='http://www.renren.com/896754064', proxies=proxies) # req = requests.post(url, data, proxies) with open('renren2.html', 'w', encoding='utf-8')as f: f.write(req2.text)
def __init__(self): self.url = 'https://www.toutiao.com/api/pc/feed/' self.jm_js = "http://127.0.0.1:3000/" self.session = requests.session()
def setup(self): _LOGGER.info("setup()") self._client = requests.session() self._stoves = self.setup_stoves()
def __init__(self, credentials: PremiumCredentials): self.status = SubscriptionStatus.UNKNOWN self.session = requests.session() self.apiversion = '1' self.uri = 'https://rotki.com/api/{}/'.format(self.apiversion) self.reset_credentials(credentials)
def playVideo(self): # # Init # no_url_found = False unplayable_media_file = False have_valid_url = False # # Get current list item details... # # title = unicode(xbmc.getInfoLabel("listitem.Title"), "utf-8") thumbnail_url = xbmc.getInfoImage("list_item.Thumb") # studio = unicode(xbmc.getInfoLabel("list_item.Studio"), "utf-8") plot = unicode(xbmc.getInfoLabel("list_item.Plot"), "utf-8") genre = unicode(xbmc.getInfoLabel("list_item.Genre"), "utf-8") # # Show wait dialog while parsing data... # dialog_wait = xbmcgui.DialogProgress() dialog_wait.create(LANGUAGE(30504), self.title) # wait 1 second xbmc.sleep(1000) # Set cookies for cookie-firewall and nsfw-switch if SETTINGS.getSetting('nsfw') == 'true': cookies = {"Cookie": "cpc=10", "nsfw": "1"} else: cookies = {"Cookie": "cpc=10"} # Make a session sess = requests.session() # Set cookies for cookie-firewall and nsfw-switch if SETTINGS.getSetting('nsfw') == 'true': cookies = {"Cookie": "cpc=10", "nsfw": "1"} else: cookies = {"Cookie": "cpc=10"} # Determine if cloudflare protection is active or not html_source = sess.get(self.video_page_url, cookies=cookies).text if str(html_source).find("cloudflare") >= 0: cloudflare_active = True else: cloudflare_active = False # Get the page if cloudflare_active == True: try: import cfscrape except: xbmcgui.Dialog().ok(LANGUAGE(30000), LANGUAGE(30513)) sys.exit(1) try: # returns a CloudflareScraper instance scraper = cfscrape.create_scraper(sess) except: xbmcgui.Dialog().ok(LANGUAGE(30000), LANGUAGE(30514)) sys.exit(1) try: html_source = scraper.get(self.video_page_url).content except: xbmcgui.Dialog().ok(LANGUAGE(30000), LANGUAGE(30515)) sys.exit(1) soup = BeautifulSoup(html_source) video_url = '' # <div class="videoplayer" id="video1" data-files="eyJmbHYiOiJodHRwOlwvXC9tZWRpYS5kdW1wZXJ0Lm5sXC9mbHZcLzI4OTE2NWRhXzEwMjU1NzUyXzYzODMxODA4OTU1NDc2MV84MTk0MzU3MDVfbi5tcDQuZmx2IiwidGFibGV0IjoiaHR0cDpcL1wvbWVkaWEuZHVtcGVydC5ubFwvdGFibGV0XC8yODkxNjVkYV8xMDI1NTc1Ml82MzgzMTgwODk1NTQ3NjFfODE5NDM1NzA1X24ubXA0Lm1wNCIsIm1vYmlsZSI6Imh0dHA6XC9cL21lZGlhLmR1bXBlcnQubmxcL21vYmlsZVwvMjg5MTY1ZGFfMTAyNTU3NTJfNjM4MzE4MDg5NTU0NzYxXzgxOTQzNTcwNV9uLm1wNC5tcDQiLCJzdGlsbCI6Imh0dHA6XC9cL3N0YXRpYy5kdW1wZXJ0Lm5sXC9zdGlsbHNcLzY1OTM1MjRfMjg5MTY1ZGEuanBnIn0="></div></div> video_urls = soup.findAll('div', attrs={'class': re.compile("video")}, limit=1) if len(video_urls) == 0: no_url_found = True else: video_url_enc = video_urls[0]['data-files'] # base64 decode video_url_dec = str(base64.b64decode(video_url_enc)) # {"flv":"http:\/\/media.dumpert.nl\/flv\/5770e490_Jumbo_KOOP_DAN__Remix.avi.flv","tablet":"http:\/\/media.dumpert.nl\/tablet\/5770e490_Jumbo_KOOP_DAN__Remix.avi.mp4","mobile":"http:\/\/media.dumpert.nl\/mobile\/5770e490_Jumbo_KOOP_DAN__Remix.avi.mp4","720p":"http:\/\/media.dumpert.nl\/720p\/5770e490_Jumbo_KOOP_DAN__Remix.avi.mp4","still":"http:\/\/static.dumpert.nl\/stills\/6593503_5770e490.jpg"} # or # {"embed":"youtube:U89fl5fZETE","still":"http:\/\/static.dumpert.nl\/stills\/6650228_24eed546.jpg"} xbmc.log("[ADDON] %s v%s (%s) debug mode, %s = %s" % ( ADDON, VERSION, DATE, "video_url_dec", str(video_url_dec)), xbmc.LOGDEBUG) # convert string to dictionary video_url_dec_dict = ast.literal_eval(video_url_dec) video_url_embed = '' try: video_url_embed = str(video_url_dec_dict['embed']) embed_found = True except KeyError: embed_found = False video_url = '' if embed_found: # make youtube plugin url youtube_id = video_url_embed.replace("youtube:", "") youtube_url = 'plugin://plugin.video.youtube/play/?video_id=%s' % youtube_id video_url = youtube_url have_valid_url = True xbmc.log("[ADDON] %s v%s (%s) debug mode, %s = %s" % ( ADDON, VERSION, DATE, "video_url1", str(video_url)), xbmc.LOGDEBUG) else: # matching the desired and available quality if self.VIDEO == '0': try: video_url = str(video_url_dec_dict['mobile']) except KeyError: no_url_found = True elif self.VIDEO == '1': try: video_url = str(video_url_dec_dict['tablet']) except KeyError: try: video_url = str(video_url_dec_dict['mobile']) except KeyError: no_url_found = True elif self.VIDEO == '2': try: video_url = str(video_url_dec_dict['720p']) except KeyError: try: video_url = str(video_url_dec_dict['tablet']) except KeyError: try: video_url = str(video_url_dec_dict['mobile']) except KeyError: no_url_found = True if no_url_found: pass else: video_url = video_url.replace('\/', '/') xbmc.log("[ADDON] %s v%s (%s) debug mode, %s = %s" % ( ADDON, VERSION, DATE, "video_url2", str(video_url)), xbmc.LOGDEBUG) # The need for speed: let's guess that the video-url exists have_valid_url = True # Play video... if have_valid_url: list_item = xbmcgui.ListItem(path=video_url) xbmcplugin.setResolvedUrl(self.plugin_handle, True, list_item) # # Alert user # elif no_url_found: xbmcgui.Dialog().ok(LANGUAGE(30000), LANGUAGE(30505)) elif unplayable_media_file: xbmcgui.Dialog().ok(LANGUAGE(30000), LANGUAGE(30506))
def get_book_by_isbn(book_data, file_writer): with requests.session() as s: plu_key = get_first_plu_key() plu_title = '' book_tilte = '' isbn = '' if len(book_data) == 1: if re.search(r'[0-9\-]', book_data[0]): isbn = book_data[0].strip().decode('utf8') else: book_tilte = book_data[0].strip().decode('utf8') plu_title = book_data[0].strip().decode('utf8') elif len(book_data) > 1: chn_punctuation = u'_·!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.' book_tilte = book_data[0].strip().decode('utf8') plu_title = re.sub( ur'[%s%s]+' % (chn_punctuation, string.punctuation), ' ', book_data[0].decode('utf8')) print book_tilte isbn = book_data[1].strip().decode('utf8') res = False count = 0 row_data = [book_tilte, isbn] book_info = {'store': '', 'sale': ''} while not res and count < 2: data = {'plu_title': plu_title, 'plu_key': plu_key, 'B1': u'图书搜索'} resp = s.post(search_url, data=data) if resp.status_code == 200: count += 1 html_doc = resp.content soup = BeautifulSoup(html_doc, 'html.parser') hrefs = soup.find_all('a') book_hrefs = filter( lambda x: x['href'].startswith('views.asp?plucode'), hrefs) if len(book_hrefs) == 0: if len(book_data) == 1: break plu_title = isbn if count != 2: continue bhref = '' if len(book_hrefs) > 1: for item in book_hrefs: if item.text.strip() == book_tilte: bhref = item.get('href') if len(book_hrefs) == 1: bhref = book_hrefs[0].get('href') if not book_tilte: print book_hrefs[0].text row_data[0] = book_hrefs[0].text book_info = get_book_info(bhref) res = True try: dict_data = { u'书名': row_data[0], u'ISBN': row_data[1], u'库存信息': book_info.get('store'), } sale_info = book_info.get('sale').split(')') for si in sale_info: kv = si.split(':') if len(kv) > 1: dict_data.update( {kv[0].strip(): re.sub(r'[()()]+', '', kv[1])}) row_data.extend(book_info.values()) file_writer.writerow({ k.encode('utf8'): v.encode('utf8') for k, v in dict_data.iteritems() }) except: print row_data import traceback traceback.print_exc()
def main(): # 接受 read_data()函数返回的数据 ID, password, mailID = read_data() for i in range(len(ID)): data = { # 这个data 用来访问 登入网页 要用到的 'uname': ID[i], 'pd_mm': password[i] } req = requests.session() # 向 登入网页 发起post 请求 recv = req.post(login_url, headers=headers, data=data).text # 将 str 转为 json 类型 json也就是字典 recv = json.loads(recv) # 向 有上一次打卡data 的网站 发起get 请求 last_data = req.get(url=last_data_url, headers=headers).text print(last_data) # 转为字典 dic = json.loads(last_data) # 最后打卡要用到的 数据 data = { 'operationType': 'Create', 'sfzx': dic['sfzx'], 'jzdSheng.dm': dic['jzdSheng']['dm'], 'jzdShi.dm': dic['jzdShi']['dm'], 'jzdXian.dm': dic['jzdXian']['dm'], 'jzdDz': dic['jzdDz'], 'jzdDz2': dic['jzdDz2'], 'lxdh': dic['lxdh'], 'tw': dic['tw'], 'bz': dic['bz'], 'dm': None, 'brJccry.dm': dic['brJccry']['dm'], 'brJccry1': dic['brJccry']['mc'], 'brStzk.dm': dic['brStzk']['dm'], 'brStzk1': dic['brStzk']['mc'], 'dkd': dic['dkd'], 'dkdz': dic['dkdz'], 'dkly': dic['dkly'], 'hsjc': dic['hsjc'], 'jkm': dic['jkm'], 'jrJccry.dm': dic['jrJccry']['dm'], 'jrJccry1': dic['jrJccry']['mc'], 'jrStzk1': dic['jrStzk']['mc'], 'jrStzk.dm': dic['jrStzk']['dm'], 'jzInd': dic['jzInd'], 'tw1': dic['twM']['mc'], 'twM.dm': dic['twM']['dm'], 'xcm': dic['xcm'], 'xgym': dic['xgym'], 'yczk.dm': dic['yczk']['dm'], 'yczk1': dic['yczk']['mc'] } # 处理登入网页返回的数据 可以拿到一个 wap/main/welcome?_t_s_=1602909162783 # _t_s_=1602909162783这个是 我们需要的 add_url_data = recv['goto2'].replace('wap/main/welcome', '') print(add_url_data) # 提交打卡信息要用到的 请求头 headers1 = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Connection': 'keep-alive', 'Host': 'xggl.hnie.edu.cn', "user-agent": "Mozilla / 5.0(Linux;Android 4.0.4;Galaxy Nexus Build / IMM76B) AppleWebKit / 535.19(KHTML, like Gecko)" + \ "Chrome / 18.0.1025.133 Mobile Safari / 535.19", 'Referer': 'http://xggl.hnie.edu.cn/content/menu/student/temp/zzdk' + add_url_data, 'Origin': 'http://xggl.hnie.edu.cn', } # 提交打卡信息的网站 后面要加上_t_s_=1602909162783 clock_url = 'http://xggl.hnie.edu.cn/content/student/temp/zzdk' + add_url_data # 获取打卡后的信息 clock_over_data = req.post(url=clock_url, headers=headers1, data=data).text # 将打卡后的信息 转为 json类型 clock_over_data1 = json.loads(clock_over_data) print(clock_over_data1) # 这个text 是用来保存 发邮件的信息的 text = '' # 获取当前时间 now_time = time.localtime() # 如果打卡成功 if clock_over_data1['result']: # 通知 用户 打卡成功 text = '易班打卡' + '\nhello ' + str( ID[i]) + '\n' + '你的易班已经打卡成功' + '\n' + '{:d}年{:d}月{:d}日'.format( now_time.tm_year, now_time.tm_mon, now_time.tm_mday) print('ID' + str(ID[0]) + '已经打卡!!') else: # 如果打卡不成功 error_message = re.compile('"message":"(.*?)"') # 获取打卡失败的信息 message = re.findall(error_message, clock_over_data)[0] # 通知 用户 打卡失败 及失败原因 text = '易班打卡' + '\nhello ' + str( ID[i] ) + '\n' + '你的易班打卡失败' + '\n原因:' + message + '\n' + '{:d}年{:d}月{:d}日'.format( now_time.tm_year, now_time.tm_mon, now_time.tm_mday) print('ID' + str(ID[i]) + '打卡失败') # 发送邮件 to_meg(text, mailID[i])
def __init__(self, session: Optional[requests.Session] = None): self._session = session if session is not None else requests.session() self._set_retry_strategy()
def __init__(self, token, userId): self.userId = userId self.token = token self.session = requests.session()
def __init__(self): self.Talk = Talk() self._session = requests.session()
import datetime import re import requests import threading import Queue from conn_psql import conn_psql from requests.adapters import HTTPAdapter from bs4 import BeautifulSoup as bs from mail import send_mail create_time = datetime.datetime.now().strftime("%Y-%m-%d") max_request = 3 max_time = 3 queue = Queue.Queue() request_url = requests.session() request_url.mount('http://', HTTPAdapter(max_retries=max_request)) request_url.mount('https://', HTTPAdapter(max_retries=max_request)) message = [] def get_data(url): try: page = request_url.get(url, timeout=max_time) soup = bs(page.content, "lxml") table = soup.find("table", {"style": "margin: 20px auto 0px auto"}) trs = table.findAll("tr", {"name": "white"}) for tr in trs: tds = tr.findAll("td") sql = "INSERT INTO person(nid, name, code, orgnization, province,\ credit, create_time) values ('%s', '%s', '%s', '%s', '%s',\
class Login(object): UA = "Line/8.2.1" LA = "IOSIPAD\t8.2.1\tTanysyz\t11.2.5" auth_query_path = "/api/v4/TalkService.do" http_query_path = '/S4' login_query_path = "/api/v4p/rs" wait_for_mobile_path = "/Q" host = "https://gd2.line.naver.jp" port = 443 _session = requests.session() _headers = {} com_name = '' headers = { "User-Agent": UA, "X-Line-Application": LA, "X-LHM": "POST", "x-lal": "ja-JP_JP" } def call(callback): print (callback) def __init__(self, sid = None, password = None, callback = call, uke = None, com_name = 'Library T'): UA = "Line/8.2.1" LA = "IOSIPAD\t8.2.1\tTanysyz\t11.2.5" self._headers['User-Agent'] = UA self._headers['X-Line-Application'] = LA self.userid = sid self.password = password self.callback = callback self.pcname = com_name self.uke = uke self.login() def login(self): self.transport = THttpClient.THttpClient(self.host + ":" + str(self.port) + self.http_query_path) self.transport.setCustomHeaders(self.headers) self.protocol = TCompactProtocol.TCompactProtocol(self.transport) self.client = LineService.Client(self.protocol) self.transport.open() self.transport.path = self.auth_query_path r = self.client.getRSAKeyInfo(IdentityProvider.LINE) data = (chr(len(r.sessionKey)) + r.sessionKey + chr(len(self.userid)) + self.userid + chr(len(self.password)) + self.password) pub = rsa.PublicKey(int(r.nvalue, 16), int(r.evalue, 16)) cipher = rsa.encrypt(data, pub).encode('hex') login_request = loginRequest() login_request.type = 0 login_request.identityProvider = IdentityProvider.LINE login_request.identifier = r.keynm login_request.password = cipher login_request.keepLoggedIn = 1 login_request.accessLocation = "127.0.0,1" login_request.systemName = self.pcname login_request.e2eeVersion = 1 self.transport.path = self.login_query_path r = self.client.loginZ(login_request) if r.type == LoginResultType.SUCCESS: self.setAttr(r.authToken, self.certificate) elif r.type == LoginResultType.REQUIRE_QRCODE: pass elif r.type == LoginResultType.REQUIRE_DEVICE_CONFIRM: print("Masukin kodenya mamank : {}".format(r.pinCode)) verifier = \ requests.get(url=self.host + self.wait_for_mobile_path, headers={"X-Line-Access": r.verifier}).json()["result"]["verifier"].encode("utf-8") verifier_request = loginRequest() verifier_request.type = 1 verifier_request.verifier = verifier verifier_request.e2eeVersion = 1 r = self.client.loginZ(verifier_request) self.uke('%s,%s' % (r.certificate, r.authToken)) else: print("Eror {}".format(r.type))
def sync_meta_status(): args = docopt(__doc__) config_data = json.loads(open(args.get('<config>'), 'r').read()) (login_url, username, password, get_study_url, get_bnid_url, db_user, db_pw, db_host, database, post_meta_url, set_status_url, check_status_url, vflag) = (config_data['login_url'], config_data['username'], config_data['password'], config_data['urlGetStudy'], config_data['urlGetBnid'], config_data['dbUser'], config_data['dbPw'], config_data['dbHost'], config_data['db'], config_data['postMetaUrl'], config_data['setStatusUrl'], config_data['checkStatusUrl'], config_data['vflag']) post_client = requests.session() (post_csrftoken, post_cookies, post_headers) = set_web_stuff(post_client, login_url, vflag) login_data = dict(username=username, password=password) r = post_client.post(login_url, login_data, cookies=post_cookies, headers=post_headers) # get list of studies to check bionimbus web for if r.status_code == 200: sys.stderr.write('Successfully logged in\n') else: sys.stderr.write('Login failed for url ' + login_url + '\n got error code ' + str(r.status_code) + '\n') exit(1) # query bionimbus web for all with project and sub project (study) study_info = post_client.get(get_study_url, params=login_data) if study_info.status_code != 200: sys.stderr.write('Lookup request failed. Check cookies and tokens and try again\n') exit(1) con = db_connect(database, db_user, db_pw, db_host) # dict of rows to add to mimic sheet submission to_add = {'sheet': []} # dict for setting date of submission date_dict = {} # dict to make sure existing metadata has an entry in the status database to_check = {} for key in study_info.json(): # adding pk for study to leverage metadata lookup function get_bnid_by_study entries = query_bionimbus_web(con, key) if len(entries) > 0: (to_add, date_dict, to_check) = check_variant_viewer(entries, study_info.json()[key], login_data, get_bnid_url, post_client, to_add, date_dict, to_check) # populate variant viewer with metadata for relevant studies if not populated already if len(to_add['sheet']) > 0: (post_csrftoken, post_cookies, post_headers) = set_web_stuff(post_client, login_url, vflag) # post_headers.update({'name': 'sheet'}) check = post_client.post(post_meta_url, data=json.dumps(to_add), headers=post_headers, cookies=post_cookies, allow_redirects=False) if check.status_code == 500: sys.stderr.write('Adding new metadata failed!\n') exit(1) sys.stderr.write('Created new entries in variant viewer\n') # set variant viewer for status submitted for sequencing for newly added stuff ct = 0 for new_entry in to_add['sheet']: ct += 1 bnid = new_entry[2] # date_dict has datetime objects, need to convert to to str sub_date = str(date_dict[bnid]) status = 'Sample submitted for sequencing' check = update_status(bnid, sub_date, post_client, login_url, set_status_url, 'submit_date', status, vflag) if check != 0: sys.stderr.write('Could not set seq status') exit(1) sys.stderr.write(str(ct) + ' samples added to metadata database\n') else: sys.stderr.write('No new metadata to add. All up to date!\n') if len(to_check) > 0: ct = 0 for bnid in to_check: check = check_status(bnid, post_client, login_url, check_status_url, vflag) if check == 'No status': ct += 1 status = 'Sample submitted for sequencing' success = update_status(bnid, str(to_check[bnid]), post_client, login_url, set_status_url, 'submit_date', status, vflag) if success != 0: sys.stderr.write('Could not update submit status for ' + bnid + '\n') exit(1) sys.stderr.write(str(ct) + ' additional statuses updated\n')
def _init_session(self): session = requests.session() session.headers = self.get_headers() return session
def close(self): requests.session().close()
def __init__(self, user=None): """init Base :param user: User info """ self._user = user if user is not None else User('.thu') self._session = requests.session()
def get_images_info(url_master, url): """ This function is responsible for accessing each url from the list we created using list_of_links function and scraping all the important information about the product in a dict format, as well as saving its image. """ my_session = requests.session() cookies = '' while cookies == '': try: cookies = my_session.get(url_master).cookies break except: time.sleep(5) print("Try a Connection...") continue headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'} response = my_session.get(url, headers=headers, cookies=cookies) if response.status_code != 200: return "ERROR IN THE STATUS CODE, {}".format(response.status_code) else: page = response.text soup = BeautifulSoup(page, "lxml") # Product Information: try: unavailable = soup.find(class_='unavailable-image-message').text except: unavailable = 0 if unavailable == 0: web_id = url.split('ID=') web_id = web_id[1].split('&')[0] store = "Bloomingdale's" brand = soup.find(class_='brand-name-link product-title-no-transform h2 b-breakword').text.replace("\n", "").split() brand = " ".join(brand) price = soup.find('div', class_='price').text.replace("\n", "").split() price = " ".join(price) color = soup.find('span', class_='color-display-name').text.replace("\n", "").replace("Color: ", "").split() color = " ".join(color) name = soup.find('h1', class_='text c-no-bold product-title-no-transform b-breakword').text.replace("\n", "").split() name = " ".join(name) description = name product_type = soup.find(class_='breadcrumbs-panel no-bullet').text.replace("\n", "").split()[-1] headers = ['product_id', 'product_url', 'store', 'type', 'name', 'price', 'description', 'color', 'brand'] product_dict = dict(zip(headers, [[web_id], [url], [store], [product_type], [name], [price], [description], [color], [brand]])) # Saving the image: img_url = soup.find("meta", property="og:image")["content"] urllib.request.urlretrieve(img_url, os.path.basename(web_id + ".jpg")) return product_dict
#!/usr/bin/python3 """ Write a script or a program that votes 4096 times for your id here: http://158.69.76.135/level1.php """ import requests url = "http://158.69.76.135/level1.php" with requests.session() as client: for cont in range(4096): key = client.get(url) # dict_key = key.cookies.get_dict() data = {"id": 1992, "holdthedoor": "submit", "key": key.cookies.get_dict()['HoldTheDoor']} client.post(url, data)
def __init__(self): self.session = requests.session()
entrez_timer = entrez_timer_gen() def entrez_fetch(db, **kwargs): """ Interface to Entrez.efetch that reads email from `ENTREZ_EMAIL` environment variable and automatically rate limits to three requests per second (per https://www.ncbi.nlm.nih.gov/books/NBK25497/#chapter2.Usage_Guidelines_and_Requiremen). """ next(entrez_timer) return Entrez.efetch(db, **kwargs) sesh = requests.session() def login(rosalind_user, rosalind_password): """ Logs into http://rosalind.info/, reading credentials from `ROSALIND_USER` and `ROSALIND_PASSWORD` environment variables. """ response = sesh.get('http://rosalind.info/accounts/login/') payload = { 'username': rosalind_user, 'password': rosalind_password, 'next': '', 'csrfmiddlewaretoken': response.cookies['csrftoken'] } sesh.post('http://rosalind.info/accounts/login/', data=payload)