def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self._pool_kw_lock = RLock() self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def run(self): from requests.packages.urllib3.util.retry import Retry from requests.adapters import HTTPAdapter headers = {'User-Agent': 'yobot/1.0'} retries = Retry.from_int(0) s = requests.Session() s.mount('http://', HTTPAdapter(max_retries=retries)) s.mount('https://', HTTPAdapter(max_retries=retries)) qDebug(str(self.urls).encode()) for url in self.urls: try: resp = s.get(url, headers=headers, timeout=1.2) qDebug('{}, {}'.format(resp.status_code, resp.encoding).encode()) self.resps[url] = resp except Exception as ex: qDebug(str(ex).encode()) self.resps[url] = None return self.seq
from requests.packages.urllib3.poolmanager import PoolManager from requests.packages.urllib3.util.retry import Retry from pkg_resources import parse_version from franz.openrdf.util.strings import to_native_string # Public symbols __all__ = ['makeRequest'] # size of the buffer used to read responses BUFFER_SIZE = 4096 # Configure a retry strategy similar to what the curl backend does retries = Retry( backoff_factor=0.1, connect=10, # 10 retries for connection-level errors status_forcelist=(), # Retry only on connection errors method_whitelist=False) # Retry on all methods, even POST and PUT # We'll want to know if something contains unicode if sys.version_info >= (3, 0): unicode_type = str else: unicode_type = unicode # Never check any hostnames class HostNameIgnoringAdapter(HTTPAdapter): """ A simple transport adapter that disables hostname verification for SSL. """
import os import platform import subprocess import json import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from bs4 import BeautifulSoup # seconds DEFAULT_TIMEOUT = 5 retry_strategy = Retry( total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504], # http://httpstat.us/ method_whitelist=["HEAD", "GET", "OPTIONS"]) # To set default timeout parameter for our scrapper # GS => General Scraper # Refer: https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/#request-hooks class GSTimeOutHTTPAdapter(HTTPAdapter): def __init__(self, *args, **kwargs): self.timeout = DEFAULT_TIMEOUT if "timeout" in kwargs: self.timeout = kwargs["timeout"] del kwargs["timeout"] super().__init__(*args, **kwargs) def send(self, request, **kwargs):
except AttributeError: geofence_username = settings.OGC_SERVER['default']['USER'] try: geofence_password = settings.GEOFENCE['password'] except AttributeError: geofence_password = settings.OGC_SERVER['default']['PASSWORD'] internal_geofence = settings.OGC_SERVER['default']['LOCATION'] in geofence_url http_client = requests.session() http_client.verify = True parsed_url = urlparse(geofence_url) retry = Retry(total=4, backoff_factor=0.9, status_forcelist=[502, 503, 504], method_whitelist=set( ['HEAD', 'TRACE', 'GET', 'PUT', 'POST', 'OPTIONS', 'DELETE'])) http_client.mount("{}://".format(parsed_url.scheme), HTTPAdapter(max_retries=retry)) logger = logging.getLogger("geonode.security.models") def http_request(url, data=None, method='get', headers={}, access_token=None): req_method = getattr(http_client, method.lower()) resp = None if access_token: headers['Authorization'] = "Bearer {}".format(access_token) parsed_url = urlparse(url)
def get_chorus_data(starting_offset=0, agency_id=None): requests_session = requests.Session() retries = Retry(total=10, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]) requests_session.mount('http://', DelayedAdapter(max_retries=retries)) requests_session.mount('https://', DelayedAdapter(max_retries=retries)) agencies = get_chorus_agencies() for agency in agencies: if agency_id: if int(agency["Agency_Id"]) != int(agency_id): print "skipping {}, you are not the agency id we are looking for".format(agency["Agency_Id"]) continue if starting_offset: offset = starting_offset else: offset = 0 logger.info(u"*** on agency {}:{}".format(agency["Agency_Name"], agency["Agency_Id"])) url_template = "https://api.chorusaccess.org/v1.1/agencies/{agency_id}/histories/current?category=publicly_accessible&limit={limit}&offset={offset}" limit = 50 total_results = None while total_results==None or offset < total_results: loop_start = time() url = url_template.format(agency_id=agency["Agency_Id"], offset=offset, limit=limit) print url try: r = requests_session.get(url, timeout=360) # wait for 3 minutes except Exception, e: logger.exception(u"Exception: {}, skipping".format(unicode(e.message).encode("utf-8"))) r = None print u"api call elapsed: {} seconds".format(elapsed(loop_start, 1)) offset += limit if r: data = r.json() total_results = data["total_results"] logger.info(u"Has {} total results, {} remaining".format( total_results, total_results - offset)) items = data["items"] new_objects = [] for item in items: if item["DOI"]: doi = clean_doi(item["DOI"]) new_objects.append(Chorus(id=doi, raw=item)) ids_already_in_db = [id_tuple[0] for id_tuple in db.session.query(Chorus.id).filter(Chorus.id.in_([obj.id for obj in new_objects])).all()] objects_to_add_to_db = [obj for obj in new_objects if obj.id not in ids_already_in_db] if objects_to_add_to_db: logger.info(u"adding {} items".format(len(objects_to_add_to_db))) db.session.add_all(objects_to_add_to_db) safe_commit(db) else: logger.info(u"all of these items already in db") logger.info(u"sleeping for 2 seconds") sleep(2)
def get_session(self, *, state=None, redirect_uri=OAUTH_REDIRECT_URL, load_token=False, scopes=None): """ Create a requests Session object :param str state: session-state identifier to rebuild OAuth session (CSRF protection) :param str redirect_uri: callback URL specified in previous requests :param list(str) scopes: list of scopes we require access to :param bool load_token: load and ensure token is present :return: A ready to use requests session, or a rebuilt in-flow session :rtype: OAuth2Session """ client_id, _ = self.auth if self.auth_flow_type == 'authorization': oauth_client = WebApplicationClient(client_id=client_id) elif self.auth_flow_type == 'credentials': oauth_client = BackendApplicationClient(client_id=client_id) else: raise ValueError( '"auth_flow_type" must be either "authorization" or "credentials"' ) requested_scopes = scopes or self.scopes if load_token: # gets a fresh token from the store token = self.token_backend.get_token() if token is None: raise RuntimeError( 'No auth token found. Authentication Flow needed') oauth_client.token = token if self.auth_flow_type == 'authorization': requested_scopes = None # the scopes are already in the token (Not if type is backend) session = OAuth2Session(client_id=client_id, client=oauth_client, token=token, scope=requested_scopes) else: session = OAuth2Session(client_id=client_id, client=oauth_client, state=state, redirect_uri=redirect_uri, scope=requested_scopes) session.proxies = self.proxy if self.request_retries: retry = Retry(total=self.request_retries, read=self.request_retries, connect=self.request_retries, backoff_factor=RETRIES_BACKOFF_FACTOR, status_forcelist=RETRIES_STATUS_LIST) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) return session
Options: --target_file <target_file> File to process --output_file <output_file> Where to save the file with the orcid ids """ from docopt import docopt import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import pandas as pd import numpy as np import matplotlib.pyplot as plt s = requests.Session() retries = Retry(total=5, backoff_factor=1) s.mount('https://', HTTPAdapter(max_retries=retries)) min_abstract_length = 225 MAX_PUBMED_ID = int(25 * 1e6) MAX_PMC_ID = 6411461 def pmc_url(pmc_id): return 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id={}&retmode=xml'.format(pmc_id) def pubmed_url(pubmed_id): url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={}&retmode=xml'.format(pubmed_id) def download_article(id): url = pmc_url(id)
def get_response( link, retry_params=None, headers=None, timeout=None, proxies=None, session=None ): """ get_response gets the responses of the a URL. :param link: link to the content to be recieved :type link: str :param retry_params: rules to retry :type retry_params: dict, optional :param headers: headers for the request :type headers: dict, optional :param timeout: timeout parameters for the request :type timeout: tuple, optional :param proxies: proxies :type proxies: dict, optional :param session: a session object to be used :type session: requests.Session, optional :return: response from the url :rtype: requests.models.Response """ if retry_params is None: retry_params = {} retry_params = { **{ 'retries': 5, 'backoff_factor': 0.3, 'status_forcelist': (500, 502, 504) }, **retry_params } if headers is None: headers = random_user_agent() if timeout is None: timeout = (5, 14) if session is None: session = requests.Session() if proxies is None: proxies = {} retry = Retry( total=retry_params.get('retries'), read=retry_params.get('retries'), connect=retry_params.get('retries'), backoff_factor=retry_params.get('backoff_factor'), status_forcelist=retry_params.get('status_forcelist'), ) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) page = session.get(link, headers=headers, proxies=proxies) status = page.status_code return page
============================== This plugin adds details for articles referenced by a DOI """ from time import sleep from xml.etree import ElementTree import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from pelican import signals http = requests.Session() retries = Retry(total=5, backoff_factor=2, status_forcelist=[429, 500, 502, 503, 504]) http.mount("https://", HTTPAdapter(max_retries=retries)) def doi2json(doi): # -LH "Accept: application/vnd.citationstyles.csl+json" https://doi.org/10.1103/PhysRevE.96.062101 url = 'https://doi.org/{}'.format(doi) r = http.get(url, headers={'Accept': 'application/vnd.citationstyles.csl+json'}) try: json = r.json() except: print("error for doi", doi, ":", r) json = {}
def handle_api_response( url: str, auth: tuple = None, params: Dict[str, Any] = None, headers: Dict[str, Any] = None, timeout: tuple = (3.05, 60 * 30), ) -> requests.models.Response: """Handle and raise Python exceptions during request with retry strategy for specyfic status. Args: url (str): the URL which trying to connect. auth (tuple, optional): authorization information. Defaults to None. params (Dict[str, Any], optional): the request params also includes parameters such as the content type. Defaults to None. headers: Dict[str, Any], optional): the request headers required by Supermetrics API. timeout (tuple, optional): the request times out. Defaults to (3.05, 60 * 30). Raises: ReadTimeout: stop waiting for a response after a given number of seconds with the timeout parameter. HTTPError: exception that indicates when HTTP status codes returned values different than 200. ConnectionError: exception that indicates when client is unable to connect to the server. APIError: defined by user. Returns: response """ try: session = requests.Session() retry_strategy = Retry( total=3, status_forcelist=[429, 500, 502, 503, 504], backoff_factor=1, ) adapter = HTTPAdapter(max_retries=retry_strategy) session.mount("http://", adapter) session.mount("https://", adapter) response = session.get( url, auth=auth, params=params, headers=headers, timeout=timeout, ) response.raise_for_status() except ReadTimeout as e: msg = "The connection was successful, " msg += f"however the API call to {url} timed out after {timeout[1]}s " msg += "while waiting for the server to return data." raise APIError(msg) except HTTPError as e: raise APIError( f"The API call to {url} failed. " "Perhaps your account credentials need to be refreshed?", ) from e except (ConnectionError, Timeout) as e: raise APIError( f"The API call to {url} failed due to connection issues.") from e except ProtocolError as e: raise APIError( f"Did not receive any reponse for the API call to {url}.") except Exception as e: raise APIError("Unknown error.") from e return response
def request(session, request_type, endpoint, body=None, uri_params=None): """ Does HTTP request with retry to a Spotify endpoint. This method should return a tuple (response_json, status_code) if the request is executed, and raises an Exception if the request type is invalid. See https://developer.spotify.com/documentation/web-api/ for error codes Args: request_type: one of sp.REQUEST_GET, sp.REQUEST_POST, sp.REQUEST_PUT, sp.REQUEST_DELETE. endpoint: the Spotify uri to request body: (dict) the body to send as part of the request uri_params: (dict) the params to encode in the uri Returns: The response JSON and status code from Spotify. If the response contains invalid JSON or no content, response_json=None. Exceptions: Raises an HTTPError object in the event of an unsuccessful web request. All exceptions are as according to requests.Request. """ request_uri = Endpoints.BASE_URI + endpoint headers = { 'Authorization': 'Bearer ' + session.token(), 'Content-Type': 'application/json', 'Accept': 'application/json' } # total: max number of retries # backoff_factor: for exponential backoff. will wait 0.5,1,2,4,8,16,32 etc. # with total = 7 and backoff = 1, will wait 32 sec for last retry, 64 total retry_strategy = Retry(total=7, backoff_factor=1) # Apply the retry strategy adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount('https://', adapter) http.mount('http://', adapter) while True: response = http.request(request_type, request_uri, json=body, params=uri_params, headers=headers, timeout=session.timeout()) status_code = response.status_code # 429: rate limiting applied if status_code == 429: time.sleep(response.headers['Retry-After']) else: break # ValueError if no content; not an error try: content = response.json() except ValueError: content = None # Get error message if it exists try: message = content['error']['message'] except (KeyError, TypeError): message = str(content) # 400: bad request if status_code == 400: raise SpotifyError('%d, %s' % (status_code, message)) # 401: unauthorized if status_code == 401: raise AuthenticationError('Unauthorized: %s' % message) # 500, 502, 503: internal spotify errors, shouldn't get normally if status_code in [500, 502, 503]: raise SpotifyError('%d, %s' % (status_code, message)) # Success codes, 403 (forbidden), 404 (not found) # Our functions should case on 403/404 and deal with them accordingly. if status_code in [200, 201, 202, 204, 304, 403, 404]: return content, status_code # Request failed raise NetworkError('%d, %s' % (status_code, message))
def __init__(self, gpu, locale="en_us", test=False, headless=False): self.product_ids = set([]) self.cli_locale = locale.lower() self.locale = self.map_locales() self.session = requests.Session() self.gpu = gpu self.enabled = True self.auto_buy_enabled = False self.attempt = 0 self.started_at = datetime.now() self.test = test self.gpu_long_name = GPU_DISPLAY_NAMES[gpu] if path.exists(AUTOBUY_CONFIG_PATH): with open(AUTOBUY_CONFIG_PATH) as json_file: try: self.config = json.load(json_file) except Exception as e: log.error( "Your `autobuy_config.json` file is not valid json.") raise e if self.has_valid_creds(): self.nvidia_login = self.config["NVIDIA_LOGIN"] self.nvidia_password = self.config["NVIDIA_PASSWORD"] self.auto_buy_enabled = self.config["FULL_AUTOBUY"] self.cvv = self.config.get("CVV") self.interval = int(self.config.get("INTERVAL", 5)) else: raise InvalidAutoBuyConfigException(self.config) else: log.info("No Autobuy creds found.") # Disable auto_buy_enabled if the user does not provide a bool. if type(self.auto_buy_enabled) != bool: self.auto_buy_enabled = False adapter = TimeoutHTTPAdapter(max_retries=Retry( total=10, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "OPTIONS"], )) self.session.mount("https://", adapter) self.session.mount("http://", adapter) self.notification_handler = NotificationHandler() log.info("Opening Webdriver") if headless: enable_headless() self.driver = webdriver.Chrome(executable_path=binary_path, options=options) self.sign_in() selenium_utils.add_cookies_to_session_from_driver( self.driver, self.session) log.info("Adding driver cookies to session") log.info("Getting product IDs") self.token_data = self.get_nvidia_access_token() self.payment_option = self.get_payment_options() if not self.payment_option.get("id") or not self.cvv: log.error( "No payment option on account or missing CVV. Disable Autobuy") self.auto_buy_enabled = False else: log.debug(self.payment_option) self.ext_ip = self.get_ext_ip() if not self.auto_buy_enabled: log.info("Closing webdriver") self.driver.close() self.get_product_ids() while len(self.product_ids) == 0: log.info( f"We have no product IDs for {self.gpu_long_name}, retrying until we get a product ID" ) self.get_product_ids() sleep(5)
timeString = "" hourString = "" minuteString = "" secondString = "" lastFeedHour = 0 # Sets to hourString if we have fed this hour. If Hour and lastFeedHour don't match, dispense food! lastPriceCheckHourNANO = 0 # Sets to hourString if we have queried the API for the latest Crypto Price lastPriceCheckHourBTC = 0 # tracks last time queried API for latest BTC Price, same as above for NANO ####################################### ##### Session Requests ################ sessionConnectionSnapyIO = requests.session( ) # Used by requests to keep the session rather than a new one every request sessionConnectionBlockIO = requests.session( ) # Used by requests for BlockIO API sessionConnectionCharmant = requests.session( ) # Used by requests for Charmant and current Market Prices retry = Retry(connect=3, backoff_factor=.5) # Incrementally sleeps until request works transportAdapterSnapyIO = HTTPAdapter(max_retries=retry) transportAdapterBlockIO = HTTPAdapter( max_retries=retry) # Setup Max Retries for Session Object transportAdapterCharmant = HTTPAdapter(max_retries=retry) sessionConnectionSnapyIO.mount(SnapyBalanceURL, transportAdapterSnapyIO) sessionConnectionBlockIO.mount( 'https://block.io/', transportAdapterBlockIO ) # Use Transport Adapter for all endpoints that start with this URL sessionConnectionCharmant.mount('https://pro-api.coinmarketcap.com', transportAdapterCharmant) ####################################### ####################################### #######################################
retrieve rewitten using inspiration form: https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/ """ import argparse import sys import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry DEFAULT_TIMEOUT = 5 # seconds URL = 'https://www.uniprot.org/' retry_strategy = Retry(total=5, backoff_factor=2, status_forcelist=[429, 500, 502, 503, 504], allowed_methods=["HEAD", "GET", "OPTIONS", "POST"]) class TimeoutHTTPAdapter(HTTPAdapter): def __init__(self, *args, **kwargs): self.timeout = DEFAULT_TIMEOUT if "timeout" in kwargs: self.timeout = kwargs["timeout"] del kwargs["timeout"] super().__init__(*args, **kwargs) def send(self, request, **kwargs): timeout = kwargs.get("timeout") if timeout is None: kwargs["timeout"] = self.timeout
import urllib import json import web import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from webauthn2.util import negotiated_content_type from .config import get_service_config _config = get_service_config() _session = requests.session() _retries = Retry(connect=5, read=5, backoff_factor=1.0, status_forcelist=[500, 502, 503, 504]) _session.mount('http://', HTTPAdapter(max_retries=_retries)) _session.mount('https://', HTTPAdapter(max_retries=_retries)) if _config.credential_file is not None: with open(_config.credential_file, 'rb') as credential_file: _credentials = json.load(credential_file) else: _credentials = {} def target_server(target): if target.server_url.startswith('https://'): return target.server_url[len('https://'):]
def __init__(self, sku_id, notification_handler, headless=False): self.notification_handler = notification_handler self.sku_id = sku_id self.session = requests.Session() self.auto_buy = False self.account = {"username": "", "password": ""} adapter = HTTPAdapter(max_retries=Retry( total=3, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "OPTIONS", "POST"], )) self.session.mount("https://", adapter) self.session.mount("http://", adapter) response = self.session.get(BEST_BUY_PDP_URL.format(sku=self.sku_id), headers=DEFAULT_HEADERS) log.info(f"PDP Request: {response.status_code}") self.product_url = response.url log.info(f"Product URL: {self.product_url}") self.session.get(self.product_url) log.info(f"Product URL Request: {response.status_code}") if self.auto_buy: log.info("Loading headless driver.") if headless: enable_headless( ) # TODO - check if this still messes up the cookies. options.add_argument( "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36" ) self.driver = webdriver.Chrome( executable_path=binary_path, options=options, ) log.info("Loading https://www.bestbuy.com.") self.login() self.driver.get(self.product_url) cookies = self.driver.get_cookies() [ self.session.cookies.set_cookie( requests.cookies.create_cookie( domain=cookie["domain"], name=cookie["name"], value=cookie["value"], )) for cookie in cookies ] # self.driver.quit() log.info("Calling location/v1/US/approximate") log.info( self.session.get( "https://www.bestbuy.com/location/v1/US/approximate", headers=DEFAULT_HEADERS, ).status_code) log.info("Calling basket/v1/basketCount") log.info( self.session.get( "https://www.bestbuy.com/basket/v1/basketCount", headers={ "x-client-id": "browse", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36", "Accept": "application/json", }, ).status_code)
def __init__(self, host, session: Session): self.session = session retries = Retry(total=5, backoff_factor=0.3) self.session.mount('http://', HTTPAdapter(max_retries=retries)) self.host = f'{host}{PORT}' self._is_first = True
JUPYTER_ENDPOINT = getenv( "JUPYTER_ENDPOINT", "http://server.anonymous:80/notebook/anonymous/server") URL_CONTENTS = f"{JUPYTER_ENDPOINT}/api/contents" COOKIES = {"_xsrf": "token"} HEADERS = {"content-type": "application/json", "X-XSRFToken": "token"} SESSION = Session() SESSION.cookies.update(COOKIES) SESSION.headers.update(HEADERS) SESSION.hooks = { "response": lambda r, *args, **kwargs: r.raise_for_status(), } RETRY_STRATEGY = Retry( total=5, backoff_factor=0.5, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "PUT", "OPTIONS", "DELETE"]) ADAPTER = HTTPAdapter(max_retries=RETRY_STRATEGY) SESSION.mount("http://", ADAPTER) def list_files(path): """Lists the files in the specified path. Args: path (str): path to a folder. Returns: list: A list of filenames. """
def requests_session(): s = requests.Session() retry = Retry(MaxRetries, backoff_factor=RetryBackoffFactor) s.mount('http://', HTTPAdapter(max_retries=retry)) s.mount('https://', HTTPAdapter(max_retries=retry)) return s
stream.setFormatter(formatter) logger.addHandler(stream) # a separate logger with non-default terminator # to make one-line counts of tasks in BaseClass.build_q logger1 = logging.getLogger("counter") logger1.setLevel('DEBUG') stream1 = logging.StreamHandler() stream1.terminator = '\r' stream1.setFormatter(formatter) logger1.addHandler(stream1) # requests session with 3 retries session = requests.Session() retry = Retry(connect=3, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) desktop_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0']
import json import time import random import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry ENDPOINT = "https://catchvideo.net/getvideo" HEADERS = {'X-Requested-With': 'XMLHttpRequest'} session = requests.Session() retry = Retry( total=3, read=3, connect=3, backoff_factor=0.5, status_forcelist=(500, 502, 503, 504) ) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) links = [ # ("url", "filename.mp4"), # ("https://learning.oreilly.com/videos/python-digital-forensics/9781787126664/9781787126664-video1_3", "video1_3.mp4"), ] with open('links.txt', 'w') as output_file: for url_data in links:
def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) session = requests.Session() retries = Retry( total=6, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", HTTPAdapter(max_retries=retries)) if method == "GET": response = session.get(url, headers=headers, params=params) elif method == "POST": response = session.post(url, headers=headers, json=params) elif method == "DELETE": response = session.delete(url, headers=headers) else: raise ValueError("Invalid method: {}".format(method)) # Check if request returned a successful status response.raise_for_status() return response
def __init__(self, info=None, request_charset='utf-8', response_charset=None): self._counter = 0 self._cookies_filename = '' self._cookies = LWPCookieJar() self.url = None self.user_agent = USER_AGENT self.content = None self.status = None self.token = None self.passkey = None self.info = info self.proxy_url = None self.request_charset = request_charset self.response_charset = response_charset self.needs_proxylock = False self.headers = dict() self.request_headers = None self.session = requests.session() self.session.verify = False # Enabling retrying on failed requests retries = Retry( total=2, read=2, connect=2, redirect=3, backoff_factor=0.1, status_forcelist=[429, 500, 502, 503, 504] ) self.session.mount('http://', HTTPAdapter(max_retries=retries)) self.session.mount('https://', HTTPAdapter(max_retries=retries)) # self.session = cfscrape.create_scraper() # self.scraper = cfscrape.create_scraper() # self.session = self.scraper.session() global dns_public_list global dns_opennic_list dns_public_list = get_setting("public_dns_list", unicode).replace(" ", "").split(",") dns_opennic_list = get_setting("opennic_dns_list", unicode).replace(" ", "").split(",") # socket.setdefaulttimeout(60) # Parsing proxy information proxy = { 'enabled': get_setting("proxy_enabled", bool), 'use_type': get_setting("proxy_use_type", int), 'type': proxy_types[0], 'host': get_setting("proxy_host", unicode), 'port': get_setting("proxy_port", int), 'login': get_setting("proxy_login", unicode), 'password': get_setting("proxy_password", unicode), } try: proxy['type'] = proxy_types[get_setting("proxy_type", int)] except: pass if get_setting("use_public_dns", bool): connection.create_connection = patched_create_connection if get_setting("use_elementum_proxy", bool): elementum_addon = xbmcaddon.Addon(id='plugin.video.elementum') if elementum_addon and elementum_addon.getSetting('internal_proxy_enabled') == "true": self.proxy_url = "{0}://{1}:{2}".format("http", "127.0.0.1", "65222") if info and "internal_proxy_url" in info: self.proxy_url = info["internal_proxy_url"] self.session.proxies = { 'http': self.proxy_url, 'https': self.proxy_url, } elif proxy['enabled']: if proxy['use_type'] == 0 and info and "proxy_url" in info: log.debug("Setting proxy from Elementum: %s" % (info["proxy_url"])) elif proxy['use_type'] == 1: log.debug("Setting proxy with custom settings: %s" % (repr(proxy))) if proxy['login'] or proxy['password']: self.proxy_url = "{0}://{1}:{2}@{3}:{4}".format(proxy['type'], proxy['login'], proxy['password'], proxy['host'], proxy['port']) else: self.proxy_url = "{0}://{1}:{2}".format(proxy['type'], proxy['host'], proxy['port']) if self.proxy_url: self.session.proxies = { 'http': self.proxy_url, 'https': self.proxy_url, }
proj.startswith('stackforge-attic/') or \ proj == "openstack/openstack": return False else: return True # Check if this project has a plugin file def has_devstack_plugin(session, proj): # Don't link in the deb packaging repos if "openstack/deb-" in proj: return False r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj) return r.status_code == 200 logging.debug("Getting project list from %s" % url) r = requests.get(url) projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:]))) logging.debug("Found %d projects" % len(projects)) s = requests.Session() # sometimes gitea gives us a 500 error; retry sanely # https://stackoverflow.com/a/35636367 retries = Retry(total=3, backoff_factor=1, status_forcelist=[ 500 ]) s.mount('https://', HTTPAdapter(max_retries=retries)) found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) for project in found_plugins: print(project)
self.timeout = DEFAULT_TIMEOUT if "timeout" in kwargs: self.timeout = kwargs["timeout"] del kwargs["timeout"] super().__init__(*args, **kwargs) def send(self, request, **kwargs): timeout = kwargs.get("timeout") if timeout is None: kwargs["timeout"] = self.timeout return super().send(request, **kwargs) retry_strategy = Retry( total=3, status_forcelist=[403, 429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "OPTIONS"], backoff_factor=1, ) adapter = TimeoutHTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) def to_buffer(buffer_or_path): """Get a buffer from a buffer or a path. Args: buffer_or_path (typing.StringIO or str): Returns:
_artifact_keys = [ "format", "inline", "key", "src_path", "target_path", "viewer", ] def bool2str(val): return "yes" if val else "no" http_adapter = HTTPAdapter(max_retries=Retry( total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504])) class HTTPRunDB(RunDBInterface): kind = "http" def __init__(self, base_url, user="", password="", token=""): self.base_url = base_url self.user = user self.password = password self.token = token self.server_version = "" self.session = None def __repr__(self): cls = self.__class__.__name__
def run_simulation( self: Task, girder_config: GirderConfig, simulation_config: SimulationConfig, name: str, target_time: float, job: Dict[str, Any], simulation_id: str, visualize_interval: float = 30, # output every x 'minutes' TODO: integrate with viz platform ) -> Dict[str, Any]: """Run a simulation and export postprocessed vtk files to girder.""" current_time = 0 logger.info('initialize') with TemporaryDirectory() as run_dir, girder_config.client.session( ) as session: # configure retrying with exponential backoff retry = Retry( total=10, backoff_factor=0.1, # 0.1, 0.2, 0.4, etc. status_forcelist=[413, 429, 500, 503], # retry on girder's 500 error ) adapter = HTTPAdapter(max_retries=retry) session.mount(girder_config.client.urlBase, adapter) os.chdir(run_dir) try: simulation = girder_config.initialize(name, target_time, simulation_config, job['_id'], simulation_id) try: girder_config.set_status(job['_id'], JobStatus.RUNNING, current_time, target_time) except Exception: logger.info( 'Setting status failed, the simulation was probably cancelled' ) return simulation time_step: int = 0 previous_time: float = float('-inf') for state, status in run_iterator(simulation_config, target_time): if girder_config.is_cancelled(job['_id']): logger.info('Cancelling job') return simulation current_time = state.time if current_time >= visualize_interval + previous_time: previous_time = current_time logger.info(f'Simulation time {state.time}') with TemporaryDirectory() as temp_dir: temp_dir_path = Path(temp_dir) generate_vtk(state, temp_dir_path) stats = generate_summary_stats(state) step_name = '%04i' % time_step if status != Status.finalize else 'final' girder_config.upload(simulation['_id'], step_name, temp_dir_path, current_time, stats) try: girder_config.set_status(job['_id'], JobStatus.RUNNING, current_time, target_time) except Exception: logger.info( 'Setting status failed, the simulation was probably cancelled' ) return simulation time_step += 1 girder_config.finalize(simulation['_id']) girder_config.set_status(job['_id'], JobStatus.SUCCESS, target_time, target_time) return simulation except Exception: try: girder_config.set_status(job['_id'], JobStatus.ERROR, current_time, target_time) except Exception: logger.exception('Could not set girder error status') raise finally: os.chdir('/')
import requests import time from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry s = requests.Session() retries = Retry(total=500, backoff_factor=1, status_forcelist=[500, 502, 503, 504 ]) times = [] for i in range(10000): start = time.time() r = s.get("http://localhost:2235/index") times.append(time.time() - start) print(str(r.status_code) + " " + str(r.content)) print(sum(times)) print(sum(times)/len(times))
""" import re import pandas as pd import requests from .hparams import Hparams import pandas as pd import math from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import sys PROBASE_URL = "http://172.19.225.49:9068/probase/query-intent" CONNECTION_SIZE = 1000 session = requests.Session() # 会话保持,用一个会话调用接口 retry = Retry(connect=CONNECTION_SIZE, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry, pool_maxsize=CONNECTION_SIZE, pool_connections=CONNECTION_SIZE) session.mount('http://', adapter) session.mount('https://', adapter) def get_shiti(key_word, session): # 调用接口获取扩展词类别 params = { "keyword": key_word, "openWhiteList": False, "platform": "pc", "requestType": "search", "size": 3, "vid": "rBIKGF6lR+5p7zgKD0K2Ag=5"
# Get items from an AcousticBrainz website import os import json import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from train import util ret = Retry(total=10, backoff_factor=0.2) adaptor = HTTPAdapter(max_retries=ret) session = requests.Session() session.mount('http://', adaptor) session.mount('https://', adaptor) ACOUSTICBRAINZ_ROOT = "http://ac-acousticbrainz.s.upf.edu:8000" def download_mbids(mbidlist): """ Do a bulk query of MBIDs and return them Returns a dict {mbid: data, mbid: data} If an mbid doesn't exist, it is not returned. """ ret = {} url = os.path.join(ACOUSTICBRAINZ_ROOT, "api/v1/low-level") recids = ";".join(mbidlist) r = session.get(url, params={"recording_ids": recids}) r.raise_for_status() for mbid, data in r.json().items():
def __init__(self, *args, **kwargs): self.timeout = DEFAULT_TIMEOUT if "timeout" in kwargs: self.timeout = kwargs["timeout"] del kwargs["timeout"] super().__init__(*args, **kwargs) def send(self, request, **kwargs): timeout = kwargs.get("timeout") if timeout is None: kwargs["timeout"] = self.timeout return super().send(request, **kwargs) retry_strategy = Retry(total=TOTAL_RETRIES, backoff_factor=2, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["GET", "POST"]) adapter = TimeoutHTTPAdapter(max_retries=retry_strategy) def expanded_raise_for_status(res): """ Take a "requests" response object and expand the raise_for_status method to return more helpful errors :param res: :return: None """ try: res.raise_for_status() except HTTPError as e: try: raise HTTPError('{}\nReason: {}'.format(str(e), res.json()['message']))