def __init__(self, configuration=None): if configuration is None: configuration = con.Configuration() if not isinstance(configuration, con.Configuration): raise exception.InvalidConfigException( "configuration object is not an instance of Configuration.") cert_reqs = ssl.CERT_NONE if configuration.verify_ssl: cert_reqs = ssl.CERT_REQUIRED if cert_reqs == ssl.CERT_NONE: self.pool_manager = urllib3.PoolManager( num_pools=configuration.pool_size, cert_reqs=cert_reqs, retries=retry.Retry(connect=configuration.retries), timeout=configuration.timeout) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) else: if configuration.ca_certs: ca_certs = configuration.ca_certs else: # if not set certificate file, use Mozilla's root certificates. ca_certs = certifi.where() self.pool_manager = urllib3.PoolManager( num_pools=configuration.pool_size, cert_reqs=cert_reqs, ca_certs=ca_certs, retries=retry.Retry(connect=configuration.retries), timeout=configuration.timeout)
def test_https_pool_key_fields(self): """Assert the HTTPSPoolKey fields are honored when selecting a pool.""" connection_pool_kw = { "timeout": timeout.Timeout(3.14), "retries": retry.Retry(total=6, connect=2), "block": True, "source_address": "127.0.0.1", "key_file": "/root/totally_legit.key", "cert_file": "/root/totally_legit.crt", "cert_reqs": "CERT_REQUIRED", "ca_certs": "/root/path_to_pem", "ssl_version": "SSLv23_METHOD", } if sys.version_info >= (3, 7): connection_pool_kw["blocksize"] = 16384 + 1 p = PoolManager() conn_pools = [ p.connection_from_url("https://example.com/"), p.connection_from_url("https://example.com:4333/"), p.connection_from_url("https://other.example.com/"), ] # Asking for a connection pool with the same key should give us an # existing pool. dup_pools = [] for key, value in connection_pool_kw.items(): p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url("https://example.com/")) dup_pools.append(p.connection_from_url("https://example.com/")) assert all(x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j) assert all(pool in conn_pools for pool in dup_pools) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_pool_key_fields(self): """Assert the HTTPPoolKey fields are honored when selecting a pool.""" connection_pool_kw = { 'timeout': timeout.Timeout(3.14), 'retries': retry.Retry(total=6, connect=2), 'block': True, 'strict': True, 'source_address': '127.0.0.1', } p = PoolManager() conn_pools = [ p.connection_from_url('http://example.com/'), p.connection_from_url('http://example.com:8000/'), p.connection_from_url('http://other.example.com/'), ] for key, value in connection_pool_kw.items(): p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url('http://example.com/')) assert all( x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j ) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_pool_key_fields(self): """Assert the HTTPPoolKey fields are honored when selecting a pool.""" connection_pool_kw = { "timeout": timeout.Timeout(3.14), "retries": retry.Retry(total=6, connect=2), "block": True, "source_address": "127.0.0.1", } if sys.version_info >= (3, 7): connection_pool_kw["blocksize"] = 16384 + 1 p = PoolManager() conn_pools = [ p.connection_from_url("http://example.com/"), p.connection_from_url("http://example.com:8000/"), p.connection_from_url("http://other.example.com/"), ] for key, value in connection_pool_kw.items(): p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url("http://example.com/")) assert all(x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_pool_key_fields(self): """Assert the HTTPSPoolKey fields are honored when selecting a pool.""" connection_pool_kw = { 'timeout': timeout.Timeout(3.14), 'retries': retry.Retry(total=6, connect=2), 'block': True, 'strict': True, 'source_address': '127.0.0.1', 'key_file': '/root/totally_legit.key', 'cert_file': '/root/totally_legit.crt', 'cert_reqs': 'CERT_REQUIRED', 'ca_certs': '/root/path_to_pem', 'ssl_version': 'SSLv23_METHOD', } p = PoolManager() conn_pools = [ p.connection_from_url('https://example.com/'), p.connection_from_url('https://example.com:4333/'), p.connection_from_url('https://other.example.com/'), ] # Asking for a connection pool with the same key should give us an # existing pool. dup_pools = [] for key, value in connection_pool_kw.items(): p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url('https://example.com/')) dup_pools.append(p.connection_from_url('https://example.com/')) assert all(x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j) assert all(pool in conn_pools for pool in dup_pools) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_pool_key_fields(self): """Assert the HTTPSPoolKey fields are honored when selecting a pool.""" connection_pool_kw = [ ('timeout', timeout.Timeout(3.14)), ('retries', retry.Retry(total=6, connect=2)), ('block', True), ('source_address', '127.0.0.1'), ('key_file', DEFAULT_CERTS['keyfile']), ('cert_file', DEFAULT_CERTS['certfile']), ('cert_reqs', 'CERT_REQUIRED'), ('ca_certs', DEFAULT_CA), ('ca_cert_dir', DEFAULT_CA_DIR), ('ssl_version', 'SSLv23'), ('ssl_context', ssl_.create_urllib3_context()), ] p = PoolManager() conn_pools = [ p.connection_from_url('https://example.com/'), p.connection_from_url('https://example.com:4333/'), p.connection_from_url('https://other.example.com/'), ] # Asking for a connection pool with the same key should give us an # existing pool. dup_pools = [] for key, value in connection_pool_kw: p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url('https://example.com/')) dup_pools.append(p.connection_from_url('https://example.com/')) assert all(x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j) assert all(pool in conn_pools for pool in dup_pools) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def query_api(full_url): session = requests.Session() for scheme in ('http://', 'https://'): session.mount( scheme, adapters.HTTPAdapter(max_retries=retry.Retry(connect=5), )) r_s = session.get(full_url) return r_s
def session(self): requests_session = requests.Session() adapter = requests.adapters.HTTPAdapter( max_retries=retry.Retry(total=3, read=3, connect=3, backoff_factor=0.3, status_forcelist=[500, 502, 504])) requests_session.mount('http://', adapter=adapter) requests_session.mount('https://', adapter=adapter) return requests_session
def __enter__(self): session = requests.Session() retry_policy = retry.Retry(connect=10, read=10, status=10, backoff_factor=0.1, status_forcelist=RETRY_STATUS_CODES) adapter = HTTPAdapter(max_retries=retry_policy) session.mount("http://", adapter) session.mount("https://", adapter) resp = session.get(self.url) resp.raise_for_status() self.handle = BytesIO(resp.content) verify_checksum(self.handle, self.checksum_sha256) return self.handle.__enter__()
def create_session_with_retry(retry_policy=None) -> Session: retry_policy = retry_policy or retry.Retry( total=50, # seems that this has a default value of 10, # setting this to a very high number so that it'll respect the status retry count status=10, # status is the no. of retries if response is in status_forcelist read=10, status_forcelist=[409], backoff_factor=0.6, method_whitelist=frozenset( ['HEAD', 'GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])) session = Session() adapter = HTTPAdapter(max_retries=retry_policy) session.mount('http://', adapter) session.mount('https://', adapter) return session
def _requests_retry_session( retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None, ) -> requests.Session: session = session or requests.Session() retry_policy = retry.Retry( total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist, ) adapter = adapters.HTTPAdapter(max_retries=retry_policy) session.mount('http://', adapter) session.mount('https://', adapter) return session
def __init__(self, url_root, username, password, should_retry=False): """Initialize the handler with the necessary authentication credentials.""" self.auth_handler = requests.auth.HTTPBasicAuth(username, password) self.session = requests.Session() if should_retry: retry_status = [500, 502, 503, 504] # Retry for these statuses. retry = urllib3_retry.Retry( backoff_factor=0.1, # Enable backoff starting at 0.1s. method_whitelist=False, # Support all HTTP verbs. status_forcelist=retry_status) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self.session.mount('http://', adapter) self.session.mount('https://', adapter) self.url_root = url_root
def __init__(self, url=None): self.logger = logging.getLogger(__name__) self.url = url if url else config.DSP_API_URL self.logger.info(f'Using {self.url}') self.aap_api_domain = config.AAP_API_DOMAIN self.token_client = AAPTokenClient(url=config.AAP_API_URL) self.token_manager = TokenManager(token_client=self.token_client) retry_policy = retry.Retry( total=100, # seems that this has a default value of 10, # setting this to a very high number so that it'll respect the status retry count status= 17, # status is the no. of retries if response is in status_forcelist, # this count will retry for ~20mins with back off timeout within read=10, status_forcelist=[500, 502, 503, 504], backoff_factor=0.6) self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(max_retries=retry_policy) self.session.mount('https://', adapter)
def get_session(token, dry_run=False, pool_maxsize=100): if dry_run: return mock.create_autospec(requests.Session) session = requests.Session() session.headers.update({'Authorization': 'token %s' % token}) # Retry on errors that might be caused by stress testing. r = retry.Retry( backoff_factor=0.5, method_whitelist=False, # retry on any verb (including POST) status_forcelist={ 429, # concurrent_spawn_limit returns a 429 503, # if the hub container crashes we get a 503 504, # if the cloudflare gateway times out we get a 504 }) adapter = adapters.HTTPAdapter(max_retries=r, pool_maxsize=pool_maxsize) session.mount("http://", adapter) session.mount("https://", adapter) if LOG.isEnabledFor(logging.DEBUG): session.hooks['response'].append(log_response_time) return session
def test_https_pool_key_fields(self): """Assert the HTTPSPoolKey fields are honored when selecting a pool.""" connection_pool_kw = [ ("timeout", timeout.Timeout(3.14)), ("retries", retry.Retry(total=6, connect=2)), ("block", True), ("source_address", "127.0.0.1"), ("key_file", DEFAULT_CERTS["keyfile"]), ("cert_file", DEFAULT_CERTS["certfile"]), ("cert_reqs", "CERT_REQUIRED"), ("ca_certs", DEFAULT_CA), ("ca_cert_dir", DEFAULT_CA_DIR), ("ssl_version", "SSLv23"), ("ssl_context", ssl_.create_urllib3_context()), ] p = PoolManager() conn_pools = [ p.connection_from_url("https://example.com/"), p.connection_from_url("https://example.com:4333/"), p.connection_from_url("https://other.example.com/"), ] # Asking for a connection pool with the same key should give us an # existing pool. dup_pools = [] for key, value in connection_pool_kw: p.connection_pool_kw[key] = value conn_pools.append(p.connection_from_url("https://example.com/")) dup_pools.append(p.connection_from_url("https://example.com/")) assert all( x is not y for i, x in enumerate(conn_pools) for j, y in enumerate(conn_pools) if i != j ) assert all(pool in conn_pools for pool in dup_pools) assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def __init__(self, url=None): self.logger = logging.getLogger(__name__) self.headers = { 'Content-type': 'application/json', } self.url = url if url else config.INGEST_API_URL self.url = self.url.rstrip('/') self.logger.info(f'Using {self.url}') self.entity_cache = {} self.cache_enabled = True retry_policy = retry.Retry( total=100, # seems that this has a default value of 10, # setting this to a very high number so that it'll respect the status retry count status=17, # status is the no. of retries if response is in status_forcelist, # this count will retry for ~20mins with back off timeout within read=10, status_forcelist=[500, 502, 503, 504], backoff_factor=0.6) self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(max_retries=retry_policy) self.session.mount('https://', adapter)
from urllib3.util import retry import sentry_sdk from sentry_sdk.integrations import logging as sentry_logging from bob_emploi.common.python import checker from bob_emploi.common.python import mustache from bob_emploi.common.python.i18n import translation from bob_emploi.frontend.server.asynchronous.i18n import extract_mailjet_strings from bob_emploi.frontend.server.mail.templates import mailjet_templates # ID of the Airtable base containing translations. _I18N_BASE_ID = 'appkEc8N0Bw4Uok43' _HTTP_ADAPTER = adapters.HTTPAdapter(max_retries=retry.Retry( total=10, status_forcelist=[429, 503], allowed_methods=['POST', 'GET', 'OPTIONS', 'PUT'], backoff_factor=2, )) def translate_html_tags(html_soup: str, translate: Callable[[str], str], with_outer_tag: bool = True) -> str: """Translate an HTML soup. e.g. '<p style="foo"><a>Hello</a><br />World</p>', and '<1>Hello</1><2/>World' => '<1>Bonjour</1><2/>monde' => ''<p style="foo"><a>Bonjour</a><br />monde</p>' """ if with_outer_tag:
'x/tap-as-a-service-tempest-plugin' 'x/trio2o' ] url = 'https://review.opendev.org/projects/' # This is what a project looks like ''' "openstack-attic/akanda": { "id": "openstack-attic%2Fakanda", "state": "READ_ONLY" }, ''' http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') retries = retry.Retry(status_forcelist=[500], backoff_factor=1.0) def has_tempest_plugin(proj): try: r = http.request('GET', "https://opendev.org/%s/raw/branch/" "master/setup.cfg" % proj, retries=retries) if r.status == 404: return False except urllib3.exceptions.MaxRetryError as err: # We should not ignore non 404 errors. raise err p = re.compile(r'^tempest\.test_plugins', re.M) if p.findall(r.data.decode('utf-8')):