def test_override_timeout(self): """override_timeout temporarily overrides the default timeout.""" self.addCleanup(set_default_timeout_function, None) with override_timeout(1.0): self.assertEqual(1.0, get_default_timeout_function()()) set_default_timeout_function(lambda: 5.0) with override_timeout(1.0): self.assertEqual(1.0, get_default_timeout_function()())
def getExternalBugTrackerToUse(self): """See `IExternalBugTracker`.""" base_auth_url = urlappend(self.baseurl, 'launchpad-auth') # Any token will do. auth_url = urlappend(base_auth_url, 'check') try: with override_timeout(config.checkwatches.default_socket_timeout): response = urlfetch(auth_url, use_proxy=True) except requests.HTTPError as e: # If the error is HTTP 401 Unauthorized then we're # probably talking to the LP plugin. if e.response.status_code == 401: return TracLPPlugin(self.baseurl) else: return self except requests.RequestException: return self else: # If the response contains a trac_auth cookie then we're # talking to the LP plugin. However, it's unlikely that # the remote system will authorize the bogus auth token we # sent, so this check is really intended to detect broken # Trac instances that return HTTP 200 for a missing page. for cookie in response.cookies: if cookie.name == 'trac_auth': return TracLPPlugin(self.baseurl) else: return self
def makeRequest(self, method, url, **kwargs): """Make a request. :param method: The HTTP request method. :param url: The URL to request. :return: A `requests.Response` object. :raises requests.RequestException: if the request fails. """ with override_timeout(self.timeout): return urlfetch(url, method=method, use_proxy=True, **kwargs)
def _update(self, host, timeout, auth_header=None): headers = { "User-Agent": LP_USER_AGENT, "Host": host, "Accept": "application/vnd.github.v3+json", } if auth_header is not None: headers["Authorization"] = auth_header url = "https://%s/rate_limit" % host try: with override_timeout(timeout): response = urlfetch(url, headers=headers, use_proxy=True) return response.json()["resources"]["core"] except requests.RequestException as e: raise BugTrackerConnectError(url, e)
def fetchCVEURL(self, url): """Fetch CVE data from a URL, decompressing if necessary.""" self.logger.info("Downloading CVE database from %s..." % url) try: with override_timeout(config.cveupdater.timeout): # Command-line options are trusted, so allow file:// # URLs to ease testing. response = urlfetch(url, use_proxy=True, allow_file=True) except requests.RequestException: raise LaunchpadScriptFailure( 'Unable to connect for CVE database %s' % url) cve_db = response.content self.logger.info("%d bytes downloaded." % len(cve_db)) # requests will normally decompress this automatically, but that # might not be the case if we're given a file:// URL to a gzipped # file. if cve_db[:2] == b'\037\213': # gzip magic cve_db = gzip.GzipFile(fileobj=io.BytesIO(cve_db)).read() return cve_db
def request(self, host, handler, request_body, verbose=0): """Make an XMLRPC request. Uses the configured proxy server to make the connection. """ url = urlunparse((self.scheme, host, handler, '', '', '')) # httplib can raise a UnicodeDecodeError when using a Unicode # URL, a non-ASCII body and a proxy. http://bugs.python.org/issue12398 url = six.ensure_binary(url) try: with override_timeout(self.timeout): response = urlfetch( url, method='POST', headers={'Content-Type': 'text/xml'}, data=request_body, cookies=self.cookie_jar, hooks={'response': repost_on_redirect_hook}, use_proxy=True) except requests.HTTPError as e: raise ProtocolError( url.decode('utf-8'), e.response.status_code, e.response.reason, e.response.headers) else: traceback_info(response.text) return self.parse_response(BytesIO(response.content))
def getPage(self): """Download and return content from the Bugzilla page""" with override_timeout(config.updatebugzillaremotecomponents.timeout): return urlfetch(self.url, use_proxy=True).content
def _getPage(self, page): """GET the specified page on the remote HTTP server.""" page_url = urlappend(self.sourceforge_baseurl, page) with override_timeout(config.updatesourceforgeremoteproduct.timeout): return urlfetch(page_url, use_proxy=True).content