def obtain_auth_token(self): """Return a valid Microsoft Cognitive Services authentication token, obtained with the current subscription key if necessary. """ start_time = time.time() if self.auth_token is None or self.token_expiry < start_time: headers = Headers() headers.addRawHeader('Ocp-Apim-Subscription-Key', self.subscription_key) headers.addRawHeader('Content-Length', '0') response = yield self.agent.request( 'POST', AUTH_URL, headers=headers) if response.code != 200: data = yield readBody(response) self.log.error( 'Could not authenticate to Microsoft Cognitive ' 'Services: {data}', data=data) raise UserVisibleError( 'Could not authenticate to Microsoft Cognitive ' 'Services. Try again later.') # Coerce the access token to a byte string to avoid problems # inside Twisted's header handling code down the line. self.auth_token = ( (yield readBody(response)).strip().decode('ascii')) self.token_expiry = start_time + AUTH_TOKEN_TTL returnValue(self.auth_token)
def test_untrusted_Origins_are_not_allowed_with_OriginCheck_Trusted(agent): # When using WebSocketOriginCheck Trusted, even a same-origin request isn't # good enough if the origin is not on the whitelist. response = yield make_request(agent, path='/origin-whitelist', origin=make_root()) assert response.code == 403 client.readBody(response).cancel() # immediately close the connection
def _createCommitProvisionCB(self, agent, conn_url, header): c_resp = yield agent.request('GET', conn_url, header) body = yield readBody(c_resp) c_info = json.loads(body) self.failUnlessEquals(c_info['reservation_state'], 'ReserveHeld', 'State did not transit to held after creation') status_url = conn_url + '/status' # commit producer2 = FileBodyProducer(StringIO('commit')) resp2 = yield agent.request('POST', status_url, header, producer2) self.failUnlessEqual(resp2.code, 200, 'Service did not return OK after commit') # should do new call here.. c_resp = yield agent.request('GET', conn_url, header) body = yield readBody(c_resp) c_info2 = json.loads(body) self.failUnlessEquals(c_info2['reservation_state'], 'ReserveStart', 'State did not transit after commit') # provision producer3 = FileBodyProducer(StringIO('provision')) resp3 = yield agent.request('POST', status_url, header, producer3) self.failUnlessEqual(resp3.code, 200, 'Service did not return OK after provision') # give the provider a bit of time to switch yield task.deferLater(reactor, 0.1, self._createCommitProvisionCB2, agent, conn_url, header)
def invalid_version_response(agent, request): """ A fixture that performs a bad handshake with a prohibited WebSocket version. """ response = pytest.blockon(make_request(agent, version=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def bad_protocol_response(agent, request): """ A fixture that performs a bad handshake with an invalid Sec-WebSocket-Protocol header. """ response = pytest.blockon(make_request(agent, protocol=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def trusted_origin_response(agent, request): """ A fixture that performs a handshake using one of the explicitly trusted test Origins. """ response = pytest.blockon(make_request(agent, path='/origin-whitelist', origin=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def increment_response(agent, request): """ A fixture that connects to the dumb-increment plugin with the given subprotocol list. """ response = pytest.blockon(make_request(agent, path='/dumb-increment', protocol=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def test_cpu_load_does_not_spike_when_idle(agent): """ A regression test for issue #9 (railed CPU when a WebSocket connection is open but idle). """ response = yield make_request(agent) try: # Now that the connection is open, see if any CPUs are in trouble. assert not any_cpus_railed() finally: client.readBody(response).cancel() # close the connection
def good_origin_response(agent, request): """ A fixture that performs a handshake with an Origin that matches the server. """ host = make_authority(host=request.param[0]) origin = make_root(host=request.param[0]) version = request.param[1] response = pytest.blockon(make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def bad_origin_response(agent, request): """ A fixture that performs a good handshake, but with an Origin that does not match the server. """ origin = request.param[0] host = request.param[1] version = request.param[2] response = pytest.blockon(make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def _decode_response(self, response): def decode(text): return json.loads(text) d = readBody(response) d.addCallback(decode) d.addCallback(self._construct_response_object, response.headers) return d
def post_urlencoded_get_raw(self, destination, path, accept_partial=False, args={}): query_bytes = urllib.urlencode(args, True) def body_callback(method, url_bytes, headers_dict): return FileBodyProducer(StringIO(query_bytes)) response = yield self._create_request( destination.encode("ascii"), "POST", path.encode("ascii"), body_callback=body_callback, headers_dict={ "Content-Type": ["application/x-www-form-urlencoded"] } ) try: body = yield readBody(response) defer.returnValue(body) except PartialDownloadError as e: if accept_partial: defer.returnValue(e.response) else: raise e
def asyncPost(): agent = Agent(reactor) headers = {'User-Agent':[self.userAgent], 'PRIVATE-TOKEN':[self._private_token]} class StringProducer(object): implements(IBodyProducer) def __init__(self): self.length = len(http_body) def startProducing(self, consumer): consumer.write(http_body) return defer.succeed(None) def stopProducing(self): pass def pauseProducing(self): pass def resumeProducing(self): pass response = yield agent.request(method, url, headers=Headers(headers), bodyProducer=StringProducer() if http_body else None) resp_headers = {} for k in response.headers._rawHeaders: resp_headers[k] = response.headers._rawHeaders[k][0]; isValid = self._parse_headers(resp_headers) if isValid: body = yield readBody(response) defer.returnValue(json.loads(body)) defer.returnValue(None)
def POST_request(self, url, headers=None, body=None): scheme = urlparse(url).scheme agent = self.agent(scheme) if headers is None: headers = self.default_headers() if body is None: body = "" semaphore = getDeferredSemaphore(self.auth_url, MAX_PARALLEL) log.debug("POST %s", url) try: response = yield semaphore.run( add_timeout, agent.request( 'POST', url, headers=headers, bodyProducer=StringProducer(body) ), READ_TIMEOUT ) except TimeoutError: raise TimeoutError("POST %s" % url) body = yield readBody(response) log.debug("POST %s => %s", url, body) # If the request resulted in an error, raise an exception self.handle_error_response(response, body) returnValue((body, response.headers))
def get_onionoo_details(self, agent): """ Requests the 'details' document from onionoo.torproject.org via the given `twisted.web.iweb.IAgent` -- you can get a suitable instance to pass here by calling either :meth:`txtorcon.Tor.web_agent` or :meth:`txtorcon.Circuit.web_agent`. """ uri = 'https://onionoo.torproject.org/details?lookup={}'.format(self.id_hex[1:]).encode('ascii') resp = yield agent.request(b'GET', uri) if resp.code != 200: raise RuntimeError( 'Failed to lookup relay details for {}'.format(self.id_hex) ) body = yield readBody(resp) data = json.loads(body.decode('ascii')) if len(data['relays']) != 1: raise RuntimeError( 'Got multiple relays for {}'.format(self.id_hex) ) relay_data = data['relays'][0] if relay_data['fingerprint'].lower() != self.id_hex[1:].lower(): raise RuntimeError( 'Expected "{}" but got data for "{}"'.format(self.id_hex, relay_data['fingerprint']) ) returnValue(relay_data)
def _pushSuccess(self, result, updateDeferred): if result.code >= 200 and result.code < 300: updateDeferred.callback(result) else: d = readBody(result) d.addCallback(self._failedPushBodyRead, updateDeferred=updateDeferred) d.addErrback(self._pushFailed, updateDeferred=updateDeferred)
def get_json(self, uri, args={}): """ Get's some json from the given host and path Args: uri (str): The URI to request, not including query parameters args (dict): A dictionary used to create query strings, defaults to None. **Note**: The value of each key is assumed to be an iterable and *not* a string. Returns: Deferred: Succeeds when we get *any* HTTP response. The result of the deferred is a tuple of `(code, response)`, where `response` is a dict representing the decoded JSON body. """ yield if len(args): query_bytes = urllib.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) response = yield self.agent.request( "GET", uri.encode("ascii"), ) body = yield readBody(response) defer.returnValue(json.loads(body))
def get_json(self, destination, path, args={}): if destination in _destination_mappings: destination = _destination_mappings[destination] logger.debug("get_json args: %s", args) retry_on_dns_fail = True if HttpClient.RETRY_DNS_LOOKUP_FAILURES in args: # FIXME: This isn't ideal, but the interface exposed in get_json # isn't comprehensive enough to give caller's any control over # their connection mechanics. retry_on_dns_fail = args.pop(HttpClient.RETRY_DNS_LOOKUP_FAILURES) query_bytes = urllib.urlencode(args, True) logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) response = yield self._create_request( destination.encode("ascii"), "GET", path.encode("ascii"), query_bytes=query_bytes, retry_on_dns_fail=retry_on_dns_fail ) body = yield readBody(response) defer.returnValue(json.loads(body))
def test_do_request(self): agent = OnionRoutedAgent(reactor, path=self.random_path(), state=self.tor) url = "http://127.0.0.1:{}".format(self.port) request = yield agent.request("GET", url) body = yield readBody(request) yield self.assertEqual(body, 'GET')
def callRemote(self, method, *args): logger.debug("remote call to %r, method %r with args %r", self.url, method, args) body = json.dumps(args).encode('utf-8') uri = self.url + "?method=" + method body_p = web.StringBodyProducer(body) headers = Headers({b'content-type': [b'application/json']}) resp = yield self.agent.request( 'POST', uri, headers=headers, bodyProducer=body_p) logger.debug("response code %r from %r", resp.code, uri) resp_ct = resp.headers.getRawHeaders(b'content-type', [None])[-1] resp_body = yield client.readBody(resp) if resp.code != 200: raise HttpRPCError(resp.code, resp_body, resp_ct) # TODO: read body & parse errors if not resp_body: raise HttpRPCError(resp.code, resp_body, response_content_type=resp_ct) response = json.loads(resp_body) defer.returnValue(response)
def _process_response(self, response, file_location, file_id): log.msg('Response Code: {}'.format(response.code)) d = readBody(response) d.addCallback(self._process_data, file_location, file_id) d.addErrback(self._process_error, file_location, file_id) return d
def callRemote(self, method, *args): body = xmlrpc.payloadTemplate % ( method, xmlrpclib.dumps(args, allow_none=self.xmlrpclib_allow_none), ) body_p = web.StringBodyProducer(body) headers = Headers({ b'content-type': ['text/xml'], b'content-length': [str(len(body))], }) logger.debug("call request to %r, args %r", self.url, args) resp = yield self.agent.request( 'POST', self.url, headers=headers, bodyProducer=body_p) logger.debug("response code %r", resp.code) resp_ct = resp.headers.getRawHeaders(b'content-type', [None])[-1] resp_body = yield client.readBody(resp) if resp.code != 200: raise HttpRPCError(resp.code, resp_body, resp_ct) # TODO: read body & parse errors if not resp_body: raise HttpRPCError(resp.code, None, resp_body, response_content_type=resp_ct) response = xmlrpclib.loads(resp_body, use_datetime=self.xmlrpclib_use_datetime) defer.returnValue(response[0][0])
def _make_request(self, method, **params): headers = Headers({ "User-Agent": ["Harold ([email protected])"], "Content-Type": ["application/x-www-form-urlencoded"], }) body_data = {"token": self._token} body_data.update(params) body_producer = FormEncodedBodyProducer(body_data) agent = Agent(reactor, pool=self._pool) response = yield agent.request( "POST", "https://slack.com/api/" + method, headers, body_producer, ) body = yield readBody(response) data = json.loads(body) if response.code == 429: retry_after = int(response.headers.getRawHeaders("Retry-After")[0]) raise SlackWebClientRatelimitedError(retry_after) if not data["ok"]: raise SlackWebClientResponseError(data["error"], data) warnings = data.get("warnings") if warnings: # TODO: use real logger print("WARNING FROM SLACK: %s" % warnings) returnValue(data)
def _do_status_http_request(harold_base_url, harold_secret, salon): base_url = urlparse.urlparse(harold_base_url) path = posixpath.join(base_url.path, "harold/deploy/status") url = urlparse.urlunparse(( base_url.scheme, base_url.netloc, path, None, urllib.urlencode({"salon": salon}), None )) now = str(int(time.time())) signature = hmac.new(harold_secret, now, hashlib.sha256).hexdigest() signature_header = "%s:%s" % (now, signature) agent = Agent(reactor) headers = Headers({ "User-Agent": ["rollingpin"], "X-Signature": [signature_header], }) response = yield agent.request("GET", url, headers) if response.code != 200: raise Exception("harold responded with an error: %d" % response.code) body = yield readBody(response) returnValue(json.loads(body))
def main(reactor): # use port 9051 for system tor instances, or: # ep = UNIXClientEndpoint(reactor, '/var/run/tor/control') # ep = UNIXClientEndpoint(reactor, '/var/run/tor/control') ep = TCP4ClientEndpoint(reactor, '127.0.0.1', default_control_port()) tor = yield txtorcon.connect(reactor, ep) print("Connected to {tor} via localhost:{port}".format( tor=tor, port=default_control_port(), )) # create a web.Agent that will talk via Tor. If the socks port # given isn't yet configured, this will do so. It may also be # None, which means "the first configured SOCKSPort" # agent = tor.web_agent(u'9999') agent = tor.web_agent() uri = b'http://surely-this-has-not-been-registered-and-is-invalid.com' uri = b'https://www.torproject.org' uri = b'http://timaq4ygg2iegci7.onion/' # txtorcon documentation print("Downloading {}".format(uri)) resp = yield agent.request(b'GET', uri) print("Response has {} bytes".format(resp.length)) body = yield readBody(resp) print("received body ({} bytes)".format(len(body))) print("{}\n[...]\n{}\n".format(body[:200], body[-200:]))
def handleMetricResponse(self, response): d = readBody(response) if 199 < response.code < 300: d.addCallback(self.onMetricsFetch) else: d.addCallback(self.onError) return d
def crawl_job_url(delegator_svc, job_id, url, depth): """ Crawl a URL for images. Record any images that we found under the job's record in our job store (Redis). If we encounter valid <a href> tags, fire off additional crawling announcements for the worker pool to tear into together, rather than trying to do it all here. :param str job_id: The crawling job's UUID4 string. :param str url: The URL to crawl. :param int depth: The depth of this crawling job. If it's 0, this is the top-level crawl in the job. """ # Abstraction over Twisted's HTTP client. We'll follow redirs, validate # SSL certificates, and try to work for most cases. response = yield visit_url(url, follow_redirs=True) if response.code != 200: log.err("URL %s failed with non-200 HTTP code: %d" % (url, response.code)) returnValue(None) headers = get_response_headers(response) # If this were a production environment, we'd probably want to try to # figure out chunked response body parsing. We could end up with some # huge body sizes as-is. body = yield readBody(response) # Look through the response's body for possible images and other links. image_urls, links_to_crawl = parse_response(url, headers, body) yield record_images_for_url(job_id, url, image_urls) # Rather than try to follow the links in the current invocation, hand # these off so the work may be distributed across the pool. if links_to_crawl and depth < MAX_CRAWL_DEPTH: enqueue_crawling_job(delegator_svc, job_id, links_to_crawl, depth=depth + 1)
def get_body(result): # now that we have the body, # we can return the result, using ready body # which is also a async operation. d2 = readBody(result) # get the contents of the page. d2.addCallback(return_body) d2.addErrback(fail)
def getResponse(self, response, agent, context, deferred): logger.debug("received response %s %s with headers: %s" % (response.code, response.phrase, response.headers)) if response.code != 201: raise Exception("received error response from server") response = readBody(response) response.addCallback(self.processResult, agent, context, deferred) response.addErrback(self.handleError, agent, context, deferred)
def handler_request(self, response, requestProcess, bodyProcess): request_func, request_args, request_kw = requestProcess body_func, body_args, body_kw = bodyProcess rs = request_func(response, *request_args, **request_kw) d = readBody(response) d.addCallback(body_func, *body_args, **body_kw) return d
def _createCommitProvisionCB2(self, agent, conn_url, header): resp = yield agent.request('GET', conn_url, header) data = yield readBody(resp) conn_info = json.loads(data) self.failUnlessEquals(conn_info['provision_state'], 'Provisioned', 'State did not transit to provisioned after provision')
def _read_body(response): """ Read a response body even if there is no content length. """ return readBody(response).addErrback(_extract_partial_response)
def add_file(self, dirnode_uri, name, uploadable, metadata, overwrite, progress): size = yield uploadable.get_size() contents = b"".join((yield uploadable.read(size))) uri = self.node_uri.child(u"uri", ).to_uri().to_text().encode("ascii") action = start_action( action_type=u"magic-folder:cli:add_file:put", uri=uri, size=size, ) with action: upload_response = yield self.agent.request( b"PUT", uri, bodyProducer=FileBodyProducer(BytesIO(contents)), ) if upload_response.code != 200: raise Exception( "Error response from upload endpoint: {code} {phrase}". format(**vars(upload_response)), ) filecap = yield readBody(upload_response) uri = self.node_uri.child( u"uri", dirnode_uri.to_string().decode("ascii"), u"", ).add( u"t", u"set-children", ).add( u"overwrite", u"true" if overwrite else u"false", ).to_uri().to_text().encode("ascii") action = start_action( action_type=u"magic-folder:cli:add_file:metadata", uri=uri, ) with action: response = yield self.agent.request( b"POST", uri, bodyProducer=FileBodyProducer( BytesIO( json.dumps({ name: [ u"filenode", { "ro_uri": filecap, "size": size, "metadata": metadata, }, ], }).encode("utf-8"), ), ), ) if response.code != 200: raise Exception( "Error response from metadata endpoint: {code} {phrase}". format(**vars(response))) returnValue(Node(self, from_string(filecap)))
def put_json(self, destination, path, data={}, json_data_callback=None, long_retries=False, timeout=None, ignore_backoff=False, backoff_on_404=False): """ Sends the specifed json data using PUT Args: destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to use as the request body. long_retries (bool): A boolean that indicates whether we should retry for a short or long time. timeout(int): How long to try (in ms) the destination for before giving up. None indicates no timeout. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as a failure of the server (and should therefore back off future requests) Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. On a 4xx or 5xx error response a CodeMessageException is raised. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. """ if not json_data_callback: def json_data_callback(): return data def body_callback(method, url_bytes, headers_dict): json_data = json_data_callback() self.sign_request(destination, method, url_bytes, headers_dict, json_data) producer = _JsonProducer(json_data) return producer response = yield self._request( destination, "PUT", path, body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, backoff_on_404=backoff_on_404, ) if 200 <= response.code < 300: # We need to update the transactions table to say it was sent? check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): body = yield readBody(response) defer.returnValue(json.loads(body))
def _parse_response(self, response): # TODO: Better exception than this. assert response.code in (200, 201) return readBody(response).addCallback(json.loads)
def _on_response(response): if response.code == http.OK: return readBody(response) raise HttpError(response)
def cb(resp): d = readBody(resp) d.addCallback(partial(f, what)) return d
def post_json(self, destination, path, data={}, long_retries=False, timeout=None, ignore_backoff=False, args={}): """ Sends the specifed json data using POST Args: destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. long_retries (bool): A boolean that indicates whether we should retry for a short or long time. timeout(int): How long to try (in ms) the destination for before giving up. None indicates no timeout. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. args (dict): query params Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Fails with ``HTTPRequestException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ def body_callback(method, url_bytes, headers_dict): self.sign_request(destination, method, url_bytes, headers_dict, data) return _JsonProducer(data) response = yield self._request( destination, "POST", path, query_bytes=encode_query_args(args), body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) if 200 <= response.code < 300: # We need to update the transactions table to say it was sent? check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): body = yield readBody(response) defer.returnValue(json.loads(body))
def _request(self, destination, method, path, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Creates and sends a request to the given server Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if (self.hs.config.federation_domain_whitelist and destination not in self.hs.config.federation_domain_whitelist): raise FederationDeniedError(destination) limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) destination = destination.encode("ascii") path_bytes = path.encode("ascii") with limiter: headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = self._create_url(destination, path_bytes, param_bytes, query_bytes) txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (sys.maxint - 1) outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id, destination, method, url_bytes) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "")) log_result = None try: while True: producer = None if body_callback: producer = body_callback(method, http_url_bytes, headers_dict) try: request_deferred = self.agent.request( method, url_bytes, Headers(headers_dict), producer) add_timeout_to_deferred( request_deferred, timeout / 1000. if timeout else 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, ) log_result = "%d %s" % ( response.code, response.phrase, ) break except Exception as e: if not retry_on_dns_fail and isinstance( e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) log_result = "DNS Lookup failed to %s with %s" % ( destination, e) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url_bytes, _flatten_response_never_received(e), ) log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) yield sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): body = yield readBody(response) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def get_json(self, destination, path, args={}, retry_on_dns_fail=True, timeout=None, ignore_backoff=False): """ GETs some json from the given host homeserver and path Args: destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. args (dict): A dictionary used to create query strings, defaults to None. timeout (int): How long to try (in ms) the destination for before giving up. None indicates no timeout and that the request will be retried. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. Returns: Deferred: Succeeds when we get *any* HTTP response. The result of the deferred is a tuple of `(code, response)`, where `response` is a dict representing the decoded JSON body. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. """ logger.debug("get_json args: %s", args) encoded_args = {} for k, vs in args.items(): if isinstance(vs, basestring): vs = [vs] encoded_args[k] = [v.encode("UTF-8") for v in vs] query_bytes = urllib.urlencode(encoded_args, True) logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) def body_callback(method, url_bytes, headers_dict): self.sign_request(destination, method, url_bytes, headers_dict) return None response = yield self._request( destination, "GET", path, query_bytes=query_bytes, body_callback=body_callback, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, ) if 200 <= response.code < 300: # We need to update the transactions table to say it was sent? check_content_type_is_json(response.headers) with logcontext.PreserveLoggingContext(): body = yield readBody(response) defer.returnValue(json.loads(body))
def readResponseBody(response): try: body = yield readBody(response) except Exception, e: print(e)
def cb_read_body(response): if response.code != 200 and response.code != 500: # print('response :', response.code) return d = readBody(response) d.addCallback(callback, *args)
def _check(resp): self.failUnlessEqual(resp.code, 200) return readBody(resp)
def process_response(response): code = response.code def process_body(text_result): """Process JSON-RPC batch response body""" #pylint: disable=broad-except, unused-variable try: #Parse the JSON content of the JSON-RPC batch response. resp_obj = json.loads(text_result) except json.decoder.JSONDecodeError as exception: process_json_parse_error(code, text_result.decode()) resp_obj = None if resp_obj: #Assert the parsed JSON is a list. if not isinstance(resp_obj, list): process_batch_level_exception( JsonRpcBatchError( code, text_result.decode(), "Non-batch JSON response from server.")) resp_obj = [] else: #Process the individual command responses for response in resp_obj: #Get the id from the response to match with the apropriate reuest if "id" in response and response[ "id"] in unprocessed: query_id = response["id"] #Maintain list of unprocessed id's unprocessed.remove(query_id) #Look up the proper command future to map this response to. query_deferred = deferreds_map[query_id] #Distinguish between responses and errors. if "result" in response: #Set future result query_deferred.callback(response["result"]) if (not "result" in response) and "error" in response and \ "message" in response["error"] and \ "code" in response["error"] and \ "data" in response["error"]: query_deferred.errback( JsonRpcCommandError( response["error"]["code"], response["error"]["message"], response["error"]["data"])) if (not "result" in response) and ( not "error" in response): query_deferred.errback( JsonRpcCommandResponseError( "Bad command response from server", response)) #Work through any request item id not found in the response. for no_valid_response_id in unprocessed: query_deferred = deferreds_map[ no_valid_response_id] query_deferred.errback( JsonRpcCommandResponseError( "Request command id not found in response.", resp_obj)) self._fetch_batch() #Get (text) content from the server response body_deferred = readBody(response) body_deferred.addCallbacks(process_body, process_batch_level_exception) body_deferred.addBoth(self._fetch_batch) return body_deferred
def __cb_response(response, deferred): d = readBody(response) d.addCallback(__cb_received_body, deferred=deferred)
def getPage(self, url: str) -> Iterator[Deferred]: resp = yield self.http_agent.request(b'GET', url.encode()) body = yield web_client.readBody(resp) return body.decode()
def parse_response(self, response, method='POST'): log.info('{} successful'.format(method)) d = readBody(response) d.addCallback(self._cb_parse_response) return d
def read_child_web(self, content, name): print('read_child_web', name) d = readBody(content) d.addCallback(self.print_child_web) return d
def cbResponse(response): return readBody(response)
async def _exchange_code(self, code: str) -> Token: """Exchange an authorization code for a token. This calls the ``token_endpoint`` with the authorization code we received in the callback to exchange it for a token. The call uses the ``ClientAuth`` to authenticate with the client with its ID and secret. See: https://tools.ietf.org/html/rfc6749#section-3.2 https://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint Args: code: The authorization code we got from the callback. Returns: A dict containing various tokens. May look like this:: { 'token_type': 'bearer', 'access_token': 'abcdef', 'expires_in': 3599, 'id_token': 'ghijkl', 'refresh_token': 'mnopqr', } Raises: OidcError: when the ``token_endpoint`` returned an error. """ metadata = await self.load_metadata() token_endpoint = metadata.get("token_endpoint") headers = { "Content-Type": "application/x-www-form-urlencoded", "User-Agent": self._http_client.user_agent, "Accept": "application/json", } args = { "grant_type": "authorization_code", "code": code, "redirect_uri": self._callback_url, } body = urlencode(args, True) # Fill the body/headers with credentials uri, headers, body = self._client_auth.prepare(method="POST", uri=token_endpoint, headers=headers, body=body) headers = {k: [v] for (k, v) in headers.items()} # Do the actual request # We're not using the SimpleHttpClient util methods as we don't want to # check the HTTP status code and we do the body encoding ourself. response = await self._http_client.request( method="POST", uri=uri, data=body.encode("utf-8"), headers=headers, ) # This is used in multiple error messages below status = "{code} {phrase}".format( code=response.code, phrase=response.phrase.decode("utf-8")) resp_body = await make_deferred_yieldable(readBody(response)) if response.code >= 500: # In case of a server error, we should first try to decode the body # and check for an error field. If not, we respond with a generic # error message. try: resp = json_decoder.decode(resp_body.decode("utf-8")) error = resp["error"] description = resp.get("error_description", error) except (ValueError, KeyError): # Catch ValueError for the JSON decoding and KeyError for the "error" field error = "server_error" description = (( 'Authorization server responded with a "{status}" error ' "while exchanging the authorization code.").format( status=status), ) raise OidcError(error, description) # Since it is a not a 5xx code, body should be a valid JSON. It will # raise if not. resp = json_decoder.decode(resp_body.decode("utf-8")) if "error" in resp: error = resp["error"] # In case the authorization server responded with an error field, # it should be a 4xx code. If not, warn about it but don't do # anything special and report the original error message. if response.code < 400: logger.debug("Invalid response from the authorization server: " 'responded with a "{status}" ' "but body has an error field: {error!r}".format( status=status, error=resp["error"])) description = resp.get("error_description", error) raise OidcError(error, description) # Now, this should not be an error. According to RFC6749 sec 5.1, it # should be a 200 code. We're a bit more flexible than that, and will # only throw on a 4xx code. if response.code >= 400: description = ( 'Authorization server responded with a "{status}" error ' 'but did not include an "error" field in its response.'.format( status=status)) logger.warning(description) # Body was still valid JSON. Might be useful to log it for debugging. logger.warning( "Code exchange response: {resp!r}".format(resp=resp)) raise OidcError("server_error", description) return resp
def response_handler(self, response, handler): body = yield readBody(response) # handlers check the body is as expected; no return. yield handler(body, response.code) return True
def _callback(self, response, user_callback, data, headers): d = readBody(response) d.addCallback(self._onBody, response, user_callback, data, headers) return d
def callback_request(response): agent_reactor = readBody(response) agent_reactor.addCallback(callback_body) return agent_reactor
def test_https_request_via_proxy(self): agent = ProxyAgent( self.reactor, contextFactory=get_test_https_policy(), proxy_url_str="http://proxy.com:1080", ) self.reactor.lookups["proxy.com"] = "1.2.3.5" d = agent.request(b"GET", b"https://test.com/abc") # there should be a pending TCP connection clients = self.reactor.tcpClients self.assertEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients[0] self.assertEqual(host, "1.2.3.5") self.assertEqual(port, 1080) # make a test HTTP server, and wire up the client proxy_server = self._make_connection(client_factory, _get_test_protocol_factory()) # fish the transports back out so that we can do the old switcheroo s2c_transport = proxy_server.transport client_protocol = s2c_transport.other c2s_transport = client_protocol.transport # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) # now there should be a pending CONNECT request self.assertEqual(len(proxy_server.requests), 1) request = proxy_server.requests[0] self.assertEqual(request.method, b"CONNECT") self.assertEqual(request.path, b"test.com:443") # tell the proxy server not to close the connection proxy_server.persistent = True # this just stops the http Request trying to do a chunked response # request.setHeader(b"Content-Length", b"0") request.finish() # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel ssl_factory = _wrap_server_factory_for_tls( _get_test_protocol_factory()) ssl_protocol = ssl_factory.buildProtocol(None) http_server = ssl_protocol.wrappedProtocol ssl_protocol.makeConnection( FakeTransport(client_protocol, self.reactor, ssl_protocol)) c2s_transport.other = ssl_protocol self.reactor.advance(0) server_name = ssl_protocol._tlsConnection.get_servername() expected_sni = b"test.com" self.assertEqual( server_name, expected_sni, "Expected SNI %r but got %r" % (expected_sni, server_name), ) # now there should be a pending request self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b"GET") self.assertEqual(request.path, b"/abc") self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"]) request.write(b"result") self.reactor.advance(0) request.finish() self.reactor.advance(0) resp = self.successResultOf(d) body = self.successResultOf(readBody(resp)) self.assertEqual(body, b"result")
def _create_request(self, destination, method, path_bytes, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True): """ Creates and sends a request to the given url """ headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "",) ) logger.info("Sending request to %s: %s %s", destination, method, url_bytes) logger.debug( "Types: %s", [ type(destination), type(method), type(path_bytes), type(param_bytes), type(query_bytes) ] ) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) retries_left = 5 endpoint = self._getEndpoint(reactor, destination) while True: producer = None if body_callback: producer = body_callback(method, url_bytes, headers_dict) try: with PreserveLoggingContext(): request_deferred = self.agent.request( destination, endpoint, method, path_bytes, param_bytes, query_bytes, Headers(headers_dict), producer ) response = yield self.clock.time_bound_deferred( request_deferred, time_out=60, ) logger.debug("Got response to %s", method) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn( "DNS Lookup failed to %s with %s", destination, e ) raise logger.warn( "Sending request failed to %s: %s %s: %s - %s", destination, method, url_bytes, type(e).__name__, _flatten_response_never_received(e), ) if retries_left: yield sleep(2 ** (5 - retries_left)) retries_left -= 1 else: raise logger.info( "Received response %d %s for %s: %s %s", response.code, response.phrase, destination, method, url_bytes ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? body = yield readBody(response) raise HttpResponseException( response.code, response.phrase, body ) defer.returnValue(response)
def response_received(self, response): return readBody(response)
def run(reactor, cfg, tor, package_name, package_version): agent = tor.web_agent() state = yield tor.create_state() # download metadata from PyPI over "any" circuit uri = b'https://pypi.python.org/pypi/{}/json'.format(package_name) print("downloading: '{}'".format(uri)) resp = yield agent.request(b'GET', uri) data = yield readBody(resp) data = json.loads(data) # did we get a valid sdist URL somewhere? sdist_url = None version = None if package_version is None: # print data['releases'].keys() available = [LooseVersion(x) for x in data['releases'].keys()] package_version = str(sorted(available)[-1]) print("Using version: {}".format(package_version)) for url in data['releases'][package_version]: if url['packagetype'] == 'sdist': sdist_url = url['url'].encode('UTF8') version = url['filename'] if sdist_url is None: print("Error: couldn't find any 'sdist' URLs") raise RuntimeError("No sdist URL") else: print("Found sdist: {} for {}".format(sdist_url, version)) # download the distribution over several different circuits, # and record the sha256 hash each time. digests = [] while len(digests) < 3: circ = yield state.build_circuit() try: yield circ.when_built() except Exception: print("Circuit failed; trying another.") continue print( "Built circuit: {}".format( ' -> '.join([r.ip for r in circ.path]), ) ) try: agent = circ.web_agent(reactor, tor._default_socks_endpoint()) resp = yield agent.request(b'GET', sdist_url) # FIXME could stream this to the hasher with a custom # protocol, but teh RAMz they are cheap tarball = yield readBody(resp) except txsocksx.errors.TTLExpired as e: print("Timed out: {}".format(e)) continue except Exception as e: print("Something went wrong: {}".format(e)) continue hasher = hashlib.new('sha256') hasher.update(tarball) # the whole point is to match peep's hashes, and this is # exactly what it does: digest = urlsafe_b64encode(hasher.digest()).decode('ascii').rstrip('=') digests.append((sdist_url, circ, digest)) print("sha256:", digest) print("Found hashes:") feel_fear = False for (url, circ, digest) in digests: print(digest) if digest != digests[0][-1]: print("Fearsome Warning! Mismatched digest!!") feel_fear = True print("Circuit:", '->'.join(map(lambda r: r.hex_id, circ.path))) if feel_fear: print("****\n Something fishy!\n****") else: print("Add this to requirements.txt for peep:") print() print("# sha256: {}".format(digests[0][-1])) print("{}=={}".format(package_name, package_version)) return
def receive_pow_target(self, response, server): d = readBody(response) d.addCallback(self.process_pow_target, server)
def assert_response(self, response, code, body): self.assertEqual(response.code, code) response_body = yield readBody(response) self.assertEqual(response_body, body)
def cbResponse(response): # print(str(ignored, encode='utf-8')) print('Response received') # print(response) d = readBody(response) d.addCallback(cbBody)