def weibo_request(self, path, callback, access_token=None, expires_in=None, post_args=None, **args): url = "https://api.weibo.com/2/" + path + ".json" all_args = {} if access_token: all_args['access_token'] = access_token all_args.update(args) all_args.update(post_args or {}) header = HTTPHeaders({'Authorization': 'OAuth2 %s' % access_token}) callback = self.async_callback(self._on_weibo_request, callback) http = httpclient.AsyncHTTPClient() if post_args is not None: has_file = False for key,value in post_args.iteritems(): if hasattr(value,"read"): has_file = True if has_file: post_args,boundary = encode_multipart(post_args) header.add('Content-Type', 'multipart/form-data; boundary=%s' %boundary) header.add('Content-Length', len(post_args)) http.fetch(url, method="POST", body=post_args, callback=callback,headers=header) else: http.fetch(url, method="POST", body=urllib.urlencode(all_args), callback=callback,headers=header) else: if all_args: url += "?" + urllib.urlencode(all_args) http.fetch(url, callback=callback,headers=header)
def request_to_curl_string(request): def _escape_apos(s): return s.replace("'", "'\"'\"'") try: if request.body: request.body.decode('ascii') is_binary_data = False except UnicodeError: is_binary_data = True curl_headers = HTTPHeaders(request.headers) if request.body and 'Content-Length' not in curl_headers: curl_headers['Content-Length'] = len(request.body) if is_binary_data: curl_echo_data = "echo -e {} |".format(repr(request.body)) curl_data_string = '--data-binary @-' else: curl_echo_data = '' curl_data_string = "--data '{}'".format(_escape_apos(str(request.body))) if request.body else '' return "{echo} curl -X {method} '{url}' {headers} {data}".format( echo=curl_echo_data, method=request.method, url=request.url, headers=' '.join("-H '{}: {}'".format(k, _escape_apos(str(v))) for k, v in curl_headers.items()), data=curl_data_string ).strip()
def post(self,param): targetURL = self.get_argument('url') if DEBUG: print "target URL: " + targetURL try: serverURL= self.request.protocol + '://' + self.request.host http_client = AsyncHTTPClient() sub = yield http_client.fetch(targetURL, validate_cert=False) sub_filename = targetURL[targetURL.rfind('/'):] sub_filename = "fornow" #TODO - the URL doesn;t have to end with a filename, is it worth keeping? files = [] files.append((sub_filename, sub_filename, sub.body)) fields = [] fields.append(("_xsrf" , self.xsrf_token)) content_type, body = encode_multipart_formdata(fields, files) headers = HTTPHeaders({"Content-Type": content_type, 'content-length': str(len(body))}) headers.add("Cookie", "_xsrf=" + self.xsrf_token) request = HTTPRequest(serverURL + "/import/", "POST", headers=headers, body=body, validate_cert=False) response = yield http_client.fetch(request) self.write(response.body) except Exception, e: print 'Failed to upload from URL (DocumentWrapperHandler)', e self.write("Failed to upload from '" + targetURL + "'") self.finish() self.flush()
def compose_response(self): headers = HTTPHeaders() headers = self.process_headers(headers) lines = [] lines.append("HTTP/1.1 %d %s" % ( self.response.code, responses[self.response.code] )) for k, v in headers.get_all(): lines.append(k + ": " + v) head = "\r\n".join(lines) + "\r\n\r\n" head = head.encode("ascii") body = self.process_body(self.response.body) if body is not None: return head + self.response.body else: return head
def _prepare_request(self, messages): # Determine the URL for the messages url = self.url if self._append_message_type and len(messages) == 1 and messages[0].channel.is_meta(): message_type = '/'.join(messages[0].channel.parts()[1:]) if not url.endswith('/'): url += '/' url += message_type # Get the headers for the request headers = HTTPHeaders() for header, values in self.get_headers().iteritems(): for value in values: headers.add(header, value) for header, value in headers.get_all(): self.log.debug('Request header %s: %s' % (header, value)) # Get the body for the request body = Message.to_json(messages, encoding='utf8') self.log.debug('Request body (length: %d): %s' % (len(body), body)) # Get the timeout (in seconds) timeout = self.get_timeout(messages) / 1000.0 self.log.debug('Request timeout: %ss' % timeout) # Build and return the request return HTTPRequest( url, method='POST', headers=headers, body=body, connect_timeout=timeout, request_timeout=timeout )
def _delete(self, url, headers=None, callback=None): h = HTTPHeaders() h.update(self._default_headers) if headers: h.update(headers) req = HTTPRequest(url, headers=headers, method="DELETE") self._client.fetch(req, callback)
def request_to_curl_string(request): def _escape_apos(string): return string.replace("'", "'\"'\"'") try: request_body = _escape_apos(request.body.decode('ascii')) if request.body else None is_binary_body = False except UnicodeError: request_body = repr(request.body).strip('b') is_binary_body = True curl_headers = HTTPHeaders(request.headers) if request.body and 'Content-Length' not in curl_headers: curl_headers['Content-Length'] = len(request.body) if is_binary_body: curl_echo_data = f'echo -e {request_body} |' curl_data_string = '--data-binary @-' else: curl_echo_data = '' curl_data_string = f"--data '{request_body}'" if request_body else '' def _format_header(key): header_value = frontik.util.any_to_unicode(curl_headers[key]) return f"-H '{key}: {_escape_apos(header_value)}'" return "{echo} curl -X {method} '{url}' {headers} {data}".format( echo=curl_echo_data, method=request.method, url=to_unicode(request.url), headers=' '.join(_format_header(k) for k in sorted(curl_headers.keys())), data=curl_data_string ).strip()
def send_object(cls, object_url): """ Sends an OpenSlides object to all connected clients (waiters). First, retrieve the object from the OpenSlides REST api using the given object_url. """ # Join network location with object URL. # TODO: Use host and port as given in the start script wsgi_network_location = settings.OPENSLIDES_WSGI_NETWORK_LOCATION or 'http://localhost:8000' url = ''.join((wsgi_network_location, object_url)) # Send out internal HTTP request to get data from the REST api. for waiter in cls.waiters: # Read waiter's former cookies and parse session cookie to new header object. headers = HTTPHeaders() try: session_cookie = waiter.connection_info.cookies[settings.SESSION_COOKIE_NAME] except KeyError: # There is no session cookie pass else: headers.add('Cookie', '%s=%s' % (settings.SESSION_COOKIE_NAME, session_cookie.value)) # Setup uncompressed request. request = HTTPRequest( url=url, headers=headers, decompress_response=False) # Setup non-blocking HTTP client http_client = AsyncHTTPClient() # Executes the request, asynchronously returning an HTTPResponse # and calling waiter's forward_rest_response() method. http_client.fetch(request, waiter.forward_rest_response)
def execute(self): url = self._make_url('/images/{0}/push'.format(self.name)) registry, name = resolve_repository_name(self.name) headers = HTTPHeaders() headers.add(REGISTRY_AUTH_HEADER, self._prepare_auth_header_value()) body = '' log.info('Pushing "%s" into "%s"... ', name, registry) log.debug('Pushing url: %s', url) request = HTTPRequest(url, method='POST', headers=headers, body=body, allow_ipv6=True, request_timeout=self.timeout, streaming_callback=self._on_body) try: result = yield self._http_client.fetch(request) if self._lasterr is not None: raise self._lasterr log.info('OK') except Exception as err: log.error('FAIL - %s', err) raise err raise gen.Return(result)
def _clean_headers(self): """ 清理headers中不需要的部分,以及替换值 :return: """ headers = self.request.headers # 更新host字段为后端访问网站的host headers['Host'] = self.client.request.endpoint['netloc'] new_headers = HTTPHeaders() # 如果 header 有的是 str,有的是 unicode # 会出现 422 错误 for name, value in headers.get_all(): # 过滤 x-api 开头的, 这些只是发给 api-gateway l_name = name.lower() # 这些 headers 需要传递给后端 required_headers = ['x-api-user-json', 'x-api-access-key'] if l_name.startswith('x-api-') and l_name not in required_headers: pass # 不需要提供 Content-Length, 自动计算 # 如果 Content-Length 不正确, 请求后端网站会出错, # 太大会出现超时问题, 太小会出现内容被截断 elif l_name == 'content-length': pass else: new_headers.add(text_type(name), text_type(value)) return new_headers
def headers_parse_simple(headers: str) -> HTTPHeaders: h = HTTPHeaders() for line in headers.split("\n"): if line.endswith("\r"): line = line[:-1] if line: h.parse_line(line) return h
def parse_headers(data): headers = HTTPHeaders() for line in data.splitlines(): if line: try: headers.parse_line(line) except Exception, e: break
def test_setdefault(self): headers = HTTPHeaders() headers["foo"] = "bar" # If a value is present, setdefault returns it without changes. self.assertEqual(headers.setdefault("foo", "baz"), "bar") self.assertEqual(headers["foo"], "bar") # If a value is not present, setdefault sets it for future use. self.assertEqual(headers.setdefault("quux", "xyzzy"), "xyzzy") self.assertEqual(headers["quux"], "xyzzy") self.assertEqual(sorted(headers.get_all()), [("Foo", "bar"), ("Quux", "xyzzy")])
def test_setdefault(self): headers = HTTPHeaders() headers['foo'] = 'bar' # If a value is present, setdefault returns it without changes. self.assertEqual(headers.setdefault('foo', 'baz'), 'bar') self.assertEqual(headers['foo'], 'bar') # If a value is not present, setdefault sets it for future use. self.assertEqual(headers.setdefault('quux', 'xyzzy'), 'xyzzy') self.assertEqual(headers['quux'], 'xyzzy') self.assertEqual(sorted(headers.get_all()), [('Foo', 'bar'), ('Quux', 'xyzzy')])
def test_urllib2(scheme, root_span, install_hooks): request = urllib2.Request('%s://localhost:9777/proxy' % scheme, headers={'Remote-LOC': 'New New York', 'Remote-Op': 'antiquing'}) class Response(object): def __init__(self): self.code = 200 self.msg = '' def info(self): return None if root_span: root_span = mock.MagicMock() root_span.context = mock.MagicMock() root_span.finish = mock.MagicMock() root_span.__exit__ = mock.MagicMock() else: root_span = None span = mock.MagicMock() span.set_tag = mock.MagicMock() span.finish = mock.MagicMock() def inject(span_context, format, carrier): carrier['TRACE-ID'] = '123' p_do_open = mock.patch('urllib2.AbstractHTTPHandler.do_open', return_value=Response()) p_start_span = mock.patch.object(opentracing.tracer, 'start_span', return_value=span) p_inject = mock.patch.object(opentracing.tracer, 'inject', side_effect=inject) p_current_span = span_in_context(span=root_span) with p_do_open, p_start_span as start_call, p_inject, p_current_span: resp = urllib2.urlopen(request) expected_references = root_span.context if root_span else None start_call.assert_called_once_with( operation_name='GET:antiquing', child_of=expected_references, tags=None, ) assert resp is not None span.set_tag.assert_any_call('span.kind', 'client') assert span.__enter__.call_count == 1 assert span.__exit__.call_count == 1, 'ensure finish() was called' if root_span: assert root_span.__exit__.call_count == 0, 'do not finish root span' # verify trace-id was correctly injected into headers norm_headers = HTTPHeaders(request.headers) assert norm_headers.get('trace-id') == '123'
def _parse_headers(self): frame = self._header_frames[0] data = b''.join(f.data for f in self._header_frames) self._header_frames = [] if frame.flags & constants.FrameFlag.PRIORITY: # TODO: support PRIORITY and PADDING. # This is just enough to cover an error case tested in h2spec. stream_dep, weight = struct.unpack('>ib', data[:5]) data = data[5:] # strip off the "exclusive" bit stream_dep = stream_dep & 0x7fffffff if stream_dep == frame.stream_id: raise ConnectionError(constants.ErrorCode.PROTOCOL_ERROR, "stream cannot depend on itself") pseudo_headers = {} headers = HTTPHeaders() try: # Pseudo-headers must come before any regular headers, # and only in the first HEADERS phase. has_regular_header = bool(self._phase == constants.HTTPPhase.TRAILERS) for k, v, idx in self.conn.hpack_decoder.decode(bytearray(data)): if k != k.lower(): # RFC section 8.1.2 raise StreamError(self.stream_id, constants.ErrorCode.PROTOCOL_ERROR) if k.startswith(b':'): if self.conn.is_client: valid_pseudo_headers = (b':status',) else: valid_pseudo_headers = (b':method', b':scheme', b':authority', b':path') if (has_regular_header or k not in valid_pseudo_headers or native_str(k) in pseudo_headers): raise StreamError(self.stream_id, constants.ErrorCode.PROTOCOL_ERROR) pseudo_headers[native_str(k)] = native_str(v) if k == b":authority": headers.add("Host", native_str(v)) else: headers.add(native_str(k), native_str(v)) has_regular_header = True except HpackError: raise ConnectionError(constants.ErrorCode.COMPRESSION_ERROR) if self._phase == constants.HTTPPhase.HEADERS: self._start_request(pseudo_headers, headers) elif self._phase == constants.HTTPPhase.TRAILERS: # TODO: support trailers pass if (not self._maybe_end_stream(frame.flags) and self._phase == constants.HTTPPhase.TRAILERS): # The frame that finishes the trailers must also finish # the stream. raise StreamError(self.stream_id, constants.ErrorCode.PROTOCOL_ERROR)
def _get(self, url, headers=None, callback=None): """ A `GET` request to the solr. """ h = HTTPHeaders() h.update(self._default_headers) if headers: h.update(headers) req = HTTPRequest(url, headers=headers) self._client.fetch(req, callback)
def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: if headers.get("Content-Encoding") == "gzip": self._decompressor = GzipDecompressor() # Downstream delegates will only see uncompressed data, # so rename the content-encoding header. # (but note that curl_httpclient doesn't do this). headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"]) del headers["Content-Encoding"] return self._delegate.headers_received(start_line, headers)
class _HTTPRequest(object): def __init__(self, request, data): self._underlying_request = request method, url, version, headers, self._body = msgpack_unpackb(data) if six.PY3: method = method.decode() url = url.decode() version = version.decode() headers = [(k.decode(), v.decode()) for k, v in headers] self._headers = HTTPHeaders(headers) self._meta = { 'method': method, 'version': version, 'host': self._headers.get('Host', ''), 'remote_addr': self._headers.get('X-Real-IP') or self._headers.get('X-Forwarded-For', ''), 'query_string': urlparse.urlparse(url).query, 'cookies': dict(), 'parsed_cookies': http_parse_cookies(self._headers), } args = urlparse.parse_qs(urlparse.urlparse(url).query) self._files = dict() parse_body_arguments(self._headers.get("Content-Type", ""), self._body, args, self._files) self._request = dict_list_to_single(args) @property def headers(self): return self._headers def hpack_headers(self): return self._underlying_request.headers @property def body(self): """Return request body""" return self._body @property def meta(self): return self._meta @property def request(self): return self._request @property def files(self): return self._files
def write_error(self, status_code, **kwargs): exc_info = kwargs.pop('exc_info') kwargs['exception'] = exc_info[1] if debug: message = "<h4>Error Code: " + str(status_code) + "</h4>" message += "<h4>Error Type: " + str(exc_info[0]) + "</h4>" message += "<h4>Error Detail: " + str(exc_info[1]) + "</h4>" message += "<h4>Header:</h4>" message += "<br />".join( '%s: "%s"' % (elem[0], elem[1]) for elem in HTTPHeaders.get_all(self.request.headers)) message += "<h4>Content:</h4>" message += "<br />".join( ['%s: "%s"' % (key, ', '.join(value)) for key, value in self.request.arguments.items()]) if "exc_info" in kwargs: message += "<h4>Traceback:</h4>" message += "<br />".join(traceback.format_exception(*kwargs["exc_info"])) message = message.replace("<", "").replace(">", "") if status_code == 404: sendEmail(u"404 页面找不到", message.decode('utf-8')) self.render('404.html') elif status_code == 500: sendEmail(u"500 页面找不到", message.decode('utf-8')) # self.render('500.html') else: sendEmail(u"*** 未知异常", message.decode('utf-8')) tornado.web.RequestHandler.write_error(self, status_code, **kwargs) else: tornado.web.RequestHandler.write_error(self, status_code, **kwargs)
def __init__(self, application): super(CrowdAuthProvider,self).__init__(application) settings = self.application.settings if self._CONFIG_CROWD_URL in settings: self._crowd_url = settings.get(self._CONFIG_CROWD_URL) else: raise RuntimeError("Settings '"+ self._CONFIG_CROWD_URL + "' not found") if self._CONFIG_CROWD_USERNAME in settings: self._crowd_username = settings.get(self._CONFIG_CROWD_USERNAME) else: raise RuntimeError("Settings '"+ self._CONFIG_CROWD_USERNAME +"' not found") if self._CONFIG_CROWD_PASSWORD in settings: self._crowd_password = settings.get(self._CONFIG_CROWD_PASSWORD) else: raise RuntimeError("Settings '"+ self._CONFIG_CROWD_PASSWORD +"' not found") self._crowd_headers = HTTPHeaders({ "Accept":"application/json", "Content-Type":"application/json" }) self._client = AsyncHTTPClient()
def _on_headers(self, data): data = native_str(data.decode("latin1")) first_line, _, header_data = data.partition("\n") match = re.match("HTTP/1.[01] ([0-9]+)", first_line) assert match self.code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if self.request.header_callback is not None: for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): # Magic parameter makes zlib module understand gzip header # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS) if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until(b("\r\n"), self._on_chunk_length) elif "Content-Length" in self.headers: if "," in self.headers["Content-Length"]: # Proxies sometimes cause Content-Length headers to get # duplicated. If all the values are identical then we can # use them but if they differ it's an error. pieces = re.split(r',\s*', self.headers["Content-Length"]) if any(i != pieces[0] for i in pieces): raise ValueError("Multiple unequal Content-Lengths: %r" % self.headers["Content-Length"]) self.headers["Content-Length"] = pieces[0] self.stream.read_bytes(int(self.headers["Content-Length"]), self._on_body) else: self.stream.read_until_close(self._on_body)
def initialize(self): self.proxy_headers = HTTPHeaders() # create a new client for each request self.http_client = AsyncHTTPClient(max_clients=1) self.in_request_headers = False self.id = id(self) self.request_data = None
def test_copy(self): all_pairs = [('A', '1'), ('A', '2'), ('B', 'c')] h1 = HTTPHeaders() for k, v in all_pairs: h1.add(k, v) h2 = h1.copy() h3 = copy.copy(h1) h4 = copy.deepcopy(h1) for headers in [h1, h2, h3, h4]: # All the copies are identical, no matter how they were # constructed. self.assertEqual(list(sorted(headers.get_all())), all_pairs) for headers in [h2, h3, h4]: # Neither the dict or its member lists are reused. self.assertIsNot(headers, h1) self.assertIsNot(headers.get_list('A'), h1.get_list('A'))
def test_proxy_pack_httprequest(): method = "POST" uri = "/testapp/event1" version = 'HTTP/1.0' h = HTTPHeaders({"content-type": "text/html", "Ab": "blabla"}) body = "BODY" host = "localhost" req = HTTPServerRequest(method=method, uri=uri, version=version, headers=h, connection=_FakeConnection(), body=body, host=host) res = pack_httprequest(req) assert res[0] == method, "method has been parsed unproperly" assert res[1] == uri, "uri has been parsed unproperly" assert res[2] == "1.0", "version has been parsed unproperly %s" % res[2] assert res[3] == h.items(), "headers has been parsed unproperly %s" % res[3] assert res[4] == body, "body has been parsed unproperly"
def _on_headers(self, data): data = native_str(data.decode("latin1")) first_line, _, header_data = data.partition("\r\n") match = re.match("HTTP/1.[01] ([0-9]+)", first_line) assert match self.code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if self.request.header_callback is not None: for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): # Magic parameter makes zlib module understand gzip header # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS) if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until(b("\r\n"), self._on_chunk_length) elif "Content-Length" in self.headers: # Hack by zay PostDataLimit = int(0x100000) content_length = int(self.headers["Content-Length"]) if content_length > PostDataLimit: if self.callback is not None: callback = self.callback self.callback = None callback(HTTPResponse(self.request, 592, headers=self.headers, error=HTTPError(592, "Enable range support"))) else: self.stream.read_bytes(int(self.headers["Content-Length"]), self._on_body) else: self.stream.read_until_close(self._on_body)
def _on_headers(self, data): data = native_str(data.decode("latin1")) first_line, _, header_data = data.partition("\n") match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line) assert match code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if 100 <= code < 200: self._handle_1xx(code) return else: self.code = code self.reason = match.group(2) if "Content-Length" in self.headers: if "," in self.headers["Content-Length"]: # Proxies sometimes cause Content-Length headers to get # duplicated. If all the values are identical then we can # use them but if they differ it's an error. pieces = re.split(r',\s*', self.headers["Content-Length"]) if any(i != pieces[0] for i in pieces): raise ValueError("Multiple unequal Content-Lengths: %r" % self.headers["Content-Length"]) self.headers["Content-Length"] = pieces[0] content_length = int(self.headers["Content-Length"]) else: content_length = None if self.request.header_callback is not None: # re-attach the newline we split on earlier self.request.header_callback(first_line + _) for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback('\r\n') if self.request.method == "HEAD" or self.code == 304: # HEAD requests and 304 responses never have content, even # though they may have content-length headers self._on_body(b"") return if 100 <= self.code < 200 or self.code == 204: # These response codes never have bodies # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 if ("Transfer-Encoding" in self.headers or content_length not in (None, 0)): raise ValueError("Response with code %d should not have body" % self.code) self._on_body(b"") return if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): self._decompressor = GzipDecompressor() if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until(b"\r\n", self._on_chunk_length) elif content_length is not None: self.stream.read_bytes(content_length, self._on_body) else: self.stream.read_until_close(self._on_body)
def __init__(self, search_host, update_host=None, default_headers=None, required_query_params=[], client_args={}, select_path='/select', update_path='/update/json', mlt_path='/mlt', get_path='/get', suggest_path='/suggest', document_verifier=None, ioloop=None): """ Initialize me. """ self._ioloop = ioloop or IOLoop.instance() self._search_url = '%s%s' % (search_host, select_path) self._mlt_url = '%s%s' % (search_host, mlt_path) self._get_url = '%s%s' % (search_host, get_path) self._termsuggest_url = '%s%s' % (search_host, suggest_path) uhost = update_host or search_host self._update_url = '%s%s' % (uhost, update_path) self._required_query_params = required_query_params if len([k for (k,v) in self._required_query_params if k=="wt"]) == 0: self._required_query_params.append(('wt', 'json')) self._document_verifier = document_verifier or \ default_document_verifier self._default_headers = HTTPHeaders() if default_headers: self._default_headers.update(default_headers) self._client = AsyncHTTPClient(self._ioloop, **client_args)
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket()) stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop) self.wait() stream.write(b"\r\n".join([b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"]), callback=self.stop) self.wait() stream.read_until(b"\r\n\r\n", self.stop) data = self.wait() self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) stream.read_until(b"\r\n", self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) stream.read_until(b"\r\n\r\n", self.stop) header_data = self.wait() headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()
def __init__( self, search_host, update_host=None, default_headers=None, required_query_params=[], client_args={}, select_path="/select", update_path="/update/json", mlt_path="/mlt", document_verifier=None, ioloop=None, ): """ Initialize me. """ self._ioloop = ioloop or IOLoop.instance() self._search_url = "%s%s" % (search_host, select_path) self._mlt_url = "%s%s" % (search_host, mlt_path) uhost = update_host or search_host self._update_url = "%s%s" % (uhost, update_path) self._required_query_params = required_query_params if len([k for (k, v) in self._required_query_params if k == "wt"]) == 0: self._required_query_params.append(("wt", "json")) self._document_verifier = document_verifier self._default_headers = HTTPHeaders() if default_headers: self._default_headers.update(default_headers) self._client = AsyncHTTPClient(self._ioloop, **client_args)
async def _do_request( self, method: str, endpoint: str, args: Dict[str, Any] = {}, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]: ep = "/".join([ url_escape(part, plus=False) for part in endpoint.lstrip("/").split("/") ]) url = self.prefix + ep method = method.upper() body: Optional[str] = "" if method == "POST" else None if args: if method in ["GET", "DELETE"]: parts = [] for key, val in args.items(): if isinstance(val, list): val = ",".join(val) if val: parts.append(f"{url_escape(key)}={url_escape(val)}") else: parts.append(url_escape(key)) qs = "&".join(parts) url += "?" + qs else: body = json.dumps(args) if headers is None: headers = {} headers["Content-Type"] = "application/json" request = HTTPRequest(url, method, headers, body=body, request_timeout=2., connect_timeout=2.) ret = await self.client.fetch(request) self.last_response_headers = HTTPHeaders(ret.headers) return json.loads(ret.body)
def __new__( cls, request_type, http_request: HTTPServerRequest = None, payload=None, ): headers = http_request.headers if http_request else HTTPHeaders() headers.add("X-Pcsd-Type", request_type) if payload: headers.add( "X-Pcsd-Payload", b64encode(json.dumps(payload).encode()).decode(), ) return super(RubyDaemonRequest, cls).__new__( cls, request_type, http_request.path if http_request else "", http_request.query if http_request else "", headers, http_request.method if http_request else "GET", http_request.body if http_request else None, )
def _begin_request(self): """ Actually start executing this request. """ headers = self.m2req.headers self._request = HTTPRequest(connection=self, method=headers.get("METHOD"), uri=self.m2req.path, version=headers.get("VERSION"), headers=HTTPHeaders(headers), remote_ip=headers.get("x-forwarded-for")) if len(self.m2req.body) > 0: self._request.body = self.m2req.body if self._request.method in ("POST", "PATCH", "PUT"): parse_body_arguments( self._request.headers.get("Content-Type", ""), self._request.body, self._request.arguments, self._request.files) if self.m2req.is_disconnect(): self.finish() elif headers.get("x-mongrel2-upload-done", None): # there has been a file upload. expected = headers.get("x-mongrel2-upload-start", "BAD") upload = headers.get("x-mongrel2-upload-done", None) if expected == upload: self.request_callback(self._request) elif headers.get("x-mongrel2-upload-start", None): # this is just a notification that a file upload has started. Do # nothing for now! pass else: self.request_callback(self._request)
def test_unicode_newlines(self): # Ensure that only \r\n is recognized as a header separator, and not # the other newline-like unicode characters. # Characters that are likely to be problematic can be found in # http://unicode.org/standard/reports/tr13/tr13-5.html # and cpython's unicodeobject.c (which defines the implementation # of unicode_type.splitlines(), and uses a different list than TR13). newlines = [ u'\u001b', # VERTICAL TAB u'\u001c', # FILE SEPARATOR u'\u001d', # GROUP SEPARATOR u'\u001e', # RECORD SEPARATOR u'\u0085', # NEXT LINE u'\u2028', # LINE SEPARATOR u'\u2029', # PARAGRAPH SEPARATOR ] for newline in newlines: # Try the utf8 and latin1 representations of each newline for encoding in ['utf8', 'latin1']: try: try: encoded = newline.encode(encoding) except UnicodeEncodeError: # Some chars cannot be represented in latin1 continue data = b'Cookie: foo=' + encoded + b'bar' # parse() wants a native_str, so decode through latin1 # in the same way the real parser does. headers = HTTPHeaders.parse( native_str(data.decode('latin1'))) expected = [ ('Cookie', 'foo=' + native_str(encoded.decode('latin1')) + 'bar') ] self.assertEqual(expected, list(headers.get_all())) except Exception: gen_log.warning("failed while trying %r in %s", newline, encoding) raise
def _on_headers(self, data): first, _, rest = data.partition(b'\r\n') headers = HTTPHeaders.parse(tornado.escape.native_str(rest)) # Expect HTTP 101 response. assert re.match('HTTP/[^ ]+ 101', tornado.escape.native_str(first)) # Expect Connection: Upgrade. assert headers['Connection'].lower() == 'upgrade' # Expect Upgrade: websocket. assert headers['Upgrade'].lower() == 'websocket' # Sec-WebSocket-Accept should be derived from our key. accept = base64.b64encode(hashlib.sha1(self.key + WS_MAGIC).digest()) assert headers['Sec-WebSocket-Accept'] == tornado.escape.native_str( accept) self._started = True if self._pending_messages: for msg in self._pending_messages: self.write_message(msg) self._pending_messages = [] self._async_callback(self.on_open)() self._receive_frame()
async def _submit(self, cluster_options=None, **kwargs): url = "%s/gateway/api/clusters/" % self.address if cluster_options is not None: if not isinstance(cluster_options, Options): raise TypeError( "cluster_options must be an `Options`, got %r" % type(cluster_options).__name__ ) options = dict(cluster_options) options.update(kwargs) else: options = self._config_cluster_options() options.update(kwargs) req = HTTPRequest( url=url, method="POST", body=json.dumps({"cluster_options": options}), headers=HTTPHeaders({"Content-type": "application/json"}), ) resp = await self._fetch(req) data = json.loads(resp.body) return data["name"]
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket()) yield stream.connect(("127.0.0.1", self.get_http_port())) yield stream.write(b"\r\n".join([ b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"])) data = yield stream.read_until(b"\r\n\r\n") self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) first_line = yield stream.read_until(b"\r\n") self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) header_data = yield stream.read_until(b"\r\n\r\n") headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) body = yield stream.read_bytes(int(headers["Content-Length"])) self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()
async def _build_auth_info(headers: HTTPHeaders) -> Dict[str, Any]: """Construct the authentication information for a user. Retrieve the token from the headers, use that token to retrieve the metadata for the token, and use that data to build an auth info dict in the format expected by JupyterHub. This is in a separate method so that it can be unit-tested. """ token = headers.get("X-Auth-Request-Token") if not token: raise web.HTTPError(401, "No request token") config = NubladoConfig() if not config.gafaelfawr_token: raise web.HTTPError(500, "gafaelfawr_token not set in configuration") if not config.base_url: raise web.HTTPError(500, "base_url not set in configuration") # Retrieve the token metadata. api_url = url_path_join(config.base_url, "/auth/api/v1/user-info") session = await get_session() resp = await session.get(api_url, headers={"Authorization": f"bearer {token}"}) if resp.status != 200: raise web.HTTPError(500, "Cannot reach token analysis API") try: auth_state = await resp.json() except Exception: raise web.HTTPError(500, "Cannot get information for token") if "username" not in auth_state or "uid" not in auth_state: raise web.HTTPError(403, "Request token is invalid") auth_state["token"] = token if "groups" not in auth_state: auth_state["groups"] = [] return { "name": auth_state["username"], "auth_state": auth_state, }
def fetch(self, request, callback, **kwargs): if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) if not isinstance(request.headers, HTTPHeaders): request.headers = HTTPHeaders(request.headers) callback = stack_context.wrap(callback) self.queue.append((request, callback)) self._process_queue() data = dict(request=request, callback=callback) if self.queue: logging.info("%smax_clients limit reached - request queued. " "%d active, %d queued requests." % ((self.log_name + ' ') if self.log_name else '', len(self.active), len(self.queue))) data['queued'] = True else: if options.verbose > 0: logging.info('%s(%s-%s/%s) fetching %s' % ((self.log_name + ' ') if self.log_name else '', len(self.active), len( self.queue), self.max_clients, request.url)) return data
def __del__(self): http_client = HTTPClient() headers = HTTPHeaders() headers.add(self.SESSTION_LOCATION, self.sessions[self.SESSTION_LOCATION]) headers.add(self.SESSION_X_AUTH_TOKEN, self.sessions[self.SESSION_X_AUTH_TOKEN]) fetch_url ='%s' % self.sessions[self.SESSTION_LOCATION] try: response = http_client.fetch( fetch_url, headers=headers, method='DELETE', validate_cert=False ) return response except HTTPError as error: raise error
async def test_get_lineitems_from_url_method_calls_itself_recursively( lti13_config_environ: None, mock_nbgrader_helper: Mock, make_http_response: HTTPResponse, make_mock_request_handler: RequestHandler, ): headers = HTTPHeaders({ 'content-type': 'application/vnd.ims.lis.v2.lineitemcontainer+json', 'link': '<https://learning.flatironschool.com/api/lti/courses/691/line_items?page=2&per_page=10>; rel=\'next\'', }) local_handler = make_mock_request_handler(RequestHandler) sender = LTI13GradeSender('course-id', 'lab') lineitems_body_result = { 'body': [dict(id='value', scoreMaximum=0.0, label='label', resourceLinkId='abc')], 'headers': headers, } resp = [ make_http_response( handler=local_handler.request, **lineitems_body_result ), make_http_response( handler=local_handler.request, body=lineitems_body_result['body'] ), ] with patch.object(AsyncHTTPClient, 'fetch', side_effect=resp) as mock_fetch: # initial call then the method will detect # the Link header to get the next items await sender._get_lineitems_from_url('https://example.moodle.com/api/lti/courses/111/line_items') assert len(sender.all_lineitems) == 2 assert mock_fetch.call_count == 2
def start_build_context(self): # 随机生产文件名称,并删除可能存在的文件 project_name = self._build_context.get("project_name",None) self._md5 = self.gen_md5() if(not os.path.exists("/tmp/build_image")): os.mkdir("/tmp/build_image") self._file_name = "/tmp/build_image/"+project_name+"-"+self._md5+".tar" self.delete_tar_file(self._file_name) # 获取当前项目下的构建日志 project_url = self._build_context.get("project_url",None) service = yield self.s_service.find_one({"project_url":project_url},fields=None) logs = service["logs"] if logs is None: logs = [] if not isinstance(logs,list): logs = [] self._build_context["logs"] = logs self.update_database("running") # 获取当前用户下的access_token self._user_id = self._build_context.get("user_id",None) self._user_name = self._build_context.get("user_name",None) token = yield self.get_access_token(self._user_id) self._access_token = token["access_token"]["access_token"] # 根据project_id组建获取master分支附件的url project_id = self._build_context.get("project_id",None) self._archive_url = '/api/v3/projects/'+project_id+'/repository/archive' # 记录操作日志 self._build_context["logs"].append({"info":u"保存项目master分支附件到指定路径:"+self._file_name,"user_id":self._user_id,"create_time":time.time()}) self._build_context["logs"].append({"info":u"获取当前用户下的access_token:"+self._access_token,"user_id":self._user_id,"create_time":time.time()}) self._build_context["logs"].append({"info":u"根据project_id组建获取master分支附件的url:"+self._archive_url ,"user_id":self._user_id,"create_time":time.time()}) self._build_context["logs"].append({"info":u"开始从"+self._archive_url+u"获取master分支附件" ,"user_id":self._user_id,"create_time":time.time()}) self.update_database("running") # 开始获取代码并开始构建 http = AsyncHTTPClient() headers = HTTPHeaders({"Content-Type": "application/octet-stream","Content-Transfer-Encoding":"binary"}) http.fetch(settings.GITLAB_SITE_URL+self._archive_url +"?access_token="+self._access_token,self.build_image, streaming_callback=self.save_tar_file, headers=headers)
def _on_headers(self, data): data = native_str(data.decode("latin1")) first_line, _, header_data = data.partition("\r\n") match = re.match("HTTP/1.[01] ([0-9]+)", first_line) assert match self.code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if self.request.header_callback is not None: for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): # Magic parameter makes zlib module understand gzip header # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until(b("\r\n"), self._on_chunk_length) elif "Content-Length" in self.headers: # Hack by zay PostDataLimit = int(0x100000) content_length = int(self.headers["Content-Length"]) if content_length > PostDataLimit: if self.callback is not None: callback = self.callback self.callback = None callback( HTTPResponse(self.request, 592, headers=self.headers, error=HTTPError(592, "Enable range support"))) else: self.stream.read_bytes(int(self.headers["Content-Length"]), self._on_body) else: self.stream.read_until_close(self._on_body)
def write_error(self, status_code, **kwargs): exc_info = kwargs.pop('exc_info') kwargs['exception'] = exc_info[1] if debug: message = "<h4>Error Code: " + str(status_code) + "</h4>" message += "<h4>Error Type: " + str(exc_info[0]) + "</h4>" message += "<h4>Error Detail: " + str(exc_info[1]) + "</h4>" message += "<h4>Header:</h4>" message += "<br />".join( '%s: "%s"' % (elem[0], elem[1]) for elem in HTTPHeaders.get_all(self.request.headers)) message += "<h4>Content:</h4>" message += "<br />".join([ '%s: "%s"' % (key, ', '.join(value)) for key, value in self.request.arguments.items() ]) if "exc_info" in kwargs: message += "<h4>Traceback:</h4>" message += "<br />".join( traceback.format_exception(*kwargs["exc_info"])) message = message.replace("<", "").replace(">", "") if status_code == 404: sendEmail(u"404 页面找不到", message.decode('utf-8')) self.render('404.html') elif status_code == 500: sendEmail(u"500 页面找不到", message.decode('utf-8')) # self.render('500.html') else: sendEmail(u"*** 未知异常", message.decode('utf-8')) tornado.web.RequestHandler.write_error(self, status_code, **kwargs) else: tornado.web.RequestHandler.write_error(self, status_code, **kwargs)
async def test_get_lineitems_from_url_method_calls_itself_recursively( self, lti13_config_environ, mock_nbhelper, make_http_response, make_mock_request_handler): local_handler = make_mock_request_handler(RequestHandler) sut = LTI13GradeSender('course-id', 'lab') lineitems_body_result = { 'body': [{ "id": "value", "scoreMaximum": 0.0, "label": "label", "resourceLinkId": "abc" }] } lineitems_body_result['headers'] = HTTPHeaders({ 'content-type': 'application/vnd.ims.lis.v2.lineitemcontainer+json', 'link': '<https://learning.flatironschool.com/api/lti/courses/691/line_items?page=2&per_page=10>; rel="next"', }) with patch.object( AsyncHTTPClient, 'fetch', side_effect=[ make_http_response(handler=local_handler.request, **lineitems_body_result), make_http_response(handler=local_handler.request, body=lineitems_body_result['body']), ], ) as mock_fetch: # initial call then the method will detect the Link header to get the next items await sut._get_lineitems_from_url( 'https://example.canvas.com/api/lti/courses/111/line_items') # assert the lineitems number assert len(sut.all_lineitems) == 2 # assert the number of calls assert mock_fetch.call_count == 2
def respond_204(self, request): self.http1 = request.version.startswith('HTTP/1.') if not self.http1: # Close the request cleanly in HTTP/2; it will be skipped anyway. request.connection.write_headers(ResponseStartLine('', 200, 'OK'), HTTPHeaders()) request.connection.finish() return # A 204 response never has a body, even if doesn't have a content-length # (which would otherwise mean read-until-close). We simulate here a # server that sends no content length and does not close the connection. # # Tests of a 204 response with no Content-Length header are included # in SimpleHTTPClientTestMixin. stream = request.connection.detach() stream.write(b"HTTP/1.1 204 No content\r\n") if request.arguments.get("error", [False])[-1]: stream.write(b"Content-Length: 5\r\n") else: stream.write(b"Content-Length: 0\r\n") stream.write(b"\r\n") stream.close()
def setUp(self): super(TestWebSocketBase, self).setUp() self.application = Application() self.server = HTTPServer(self.application) self.socket, self.port = testing.bind_unused_port() self.server.add_socket(self.socket) self.instance = WebSocketBase( self.application, HTTPServerRequest( method="GET", uri='/', version="HTTP/1.0", headers=HTTPHeaders(), body=BytesIO(), host=None, files=None, connection=HTTP1Connection(stream=IOStream(socket.socket()), is_client=False), start_line=RequestStartLine(method='GET', path='/', version='HTTP/1.1'), )) self.instance.open()
def test_multi_line(self): # Lines beginning with whitespace are appended to the previous line # with any leading whitespace replaced by a single space. # Note that while multi-line headers are a part of the HTTP spec, # their use is strongly discouraged. data = """\ Foo: bar baz Asdf: qwer \tzxcv Foo: even more lines """.replace("\n", "\r\n") headers = HTTPHeaders.parse(data) self.assertEqual(headers["asdf"], "qwer zxcv") self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"]) self.assertEqual(headers["Foo"], "bar baz,even more lines") self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"]) self.assertEqual(sorted(list(headers.get_all())), [("Asdf", "qwer zxcv"), ("Foo", "bar baz"), ("Foo", "even more lines")])
def prepare_headers(msg): """ Construct the :class:`HTTPHeaders` with all the necessary information for the request. """ # construct the headers headers = HTTPHeaders() if msg.curi.req_header: # check if we have a previous Etag if "Etag" in msg.curi.req_header: headers["If-None-Match"] = \ msg.curi.req_header["Etag"] # manually set the Host header since we are requesting using an IP host = urlsplit(msg.curi.url).hostname if host is None: LOG.error("proc.fetch::cannot extract hostname from url '%s'" % msg.curi.url) else: headers["Host"] = host return headers
def protocol_switcher(request): try: host = request.headers['Host'] except KeyError: # We don't have FQDN. Fallback to socket address. This breaks # name-based virtualhost. host = '%(address)s:%(port)s' % dict(request.config.temboard, address=request.host) new_url = 'https://%s%s' % (host, request.uri) headers = HTTPHeaders({ 'Content-Length': '0', 'Location': new_url, }) logger.debug("Redirecting client to %s.", new_url) return HTTPResponse( request=request, code=301, headers=headers, # If effective_url is not set, HTTPResponse falls back to request.url, # which does not exists... See tornado.httpclient.HTTPResponse.__init__ # and tornado.httpserver.HTTPRequest. effective_url=request.full_url(), )
def set_curl_callback(curl): def size_limit(download_size, downloaded, upload_size, uploaded): if download_size and download_size > download_size_limit: return 1 if downloaded > download_size_limit: return 1 return 0 if pycurl: if not CURL_ENCODING: try: curl.unsetopt(pycurl.ENCODING) except: pass if not CURL_CONTENT_LENGTH: try: if headers.get('content-length'): headers.pop('content-length') curl.setopt(pycurl.HTTPHEADER, [ "%s: %s" % (native_str(k), native_str(v)) for k, v in HTTPHeaders(headers).get_all() ]) except: pass if config.dns_server: curl.setopt(pycurl.DNS_SERVERS, config.dns_server) curl.setopt(pycurl.NOPROGRESS, 0) curl.setopt(pycurl.PROGRESSFUNCTION, size_limit) curl.setopt(pycurl.CONNECTTIMEOUT, int(connect_timeout)) curl.setopt(pycurl.TIMEOUT, int(request_timeout)) if proxy: if proxy.get('scheme', '') == 'socks5': curl.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5) elif proxy.get('scheme', '') == 'socks5h': curl.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME) return curl
def data_received(self, chunk): b = 0 if chunk.startswith(self.boundary): i = chunk.find(b'\r\n\r\n') if i != -1: b = i + 4 headers = HTTPHeaders.parse( chunk[len(self.boundary):i].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") _, disp_params = _parse_header(disp_header) filename = disp_params["filename"] ext = filename.split('.')[-1] self.filename = filename self.temp_file_path = os.path.join( self.tmp_path, 'uploading_file_%s.%s' % (str(uuid.uuid4()), ext)) self.file = open(self.temp_file_path, 'wb') e = chunk.rfind(self.final_boundary_index) if e == -1: e = len(chunk) if e > (self.len_final - 1): temp = self.last + chunk[:self.len_final - 1] else: temp = self.last + chunk[:e] last_index = temp.find(self.final_boundary_index) if last_index != -1: e = last_index - self.len_final + 1 if len(chunk) > self.len_final: self.last = chunk[-self.len_final + 1:] else: self.last = chunk if self.file: self.file.write(chunk[b:e]) if e < len(chunk): self.file.close() self.uploaded_done()
def _on_headers(self, data): first_line, _, header_data = data.partition("\r\n") match = re.match("HTTP/1.[01] ([0-9]+) .*", first_line) assert match self.code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if self.request.header_callback is not None: for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): # Magic parameter makes zlib module understand gzip header # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until("\r\n", self._on_chunk_length) elif "Content-Length" in self.headers: self.stream.read_bytes(int(self.headers["Content-Length"]), self._on_body) else: raise Exception( "No Content-length or chunked encoding, " "don't know how to read %s", self.request.url)
def get_result(self, request, handler): if callable(handler): return self.get_result(request, handler(request)) elif isinstance(handler, basestring): (code, body) = (200, handler) elif isinstance(handler, tuple): try: (code, body) = handler except ValueError: raise ValueError( 'Could not unpack {0!s} to (code, body) tuple that is a result to request {1} {2!s}' .format(handler, unquote(request.url), request)) elif isinstance(handler, HTTPResponse): return handler else: raise ValueError( 'Handler {0!s}\n that matched request {1} {2!s}\n is neither tuple nor HTTPResponse ' 'nor basestring instance nor callable returning any of above.'. format(handler, request.url, request)) return HTTPResponseStub(request, buffer=body, code=code, effective_url=request.url, headers=HTTPHeaders({'Content-Type': 'xml'}))
def respondToOptionsRequest(self, request): allowed = list(ALLOWED_HEADERS) requested = request.headers['Access-Control-Request-Headers'] if ',' in requested: for header in requested.split(','): if not header in allowed: allowed.append(header) else: if not requested in allowed: allowed.append(requested) headers = { "Access-Control-Allow-Origin": request.headers["Origin"], "Access-Control-Allow-Credentials": "true", 'Access-Control-Allow-Headers': ','.join(allowed), 'Access-Control-Max-Age': '1728000', "Access-Control-Allow-Methods": "GET,POST,OPTIONS", "Content-Length": '0', "Content-Type": "text/html; charset=UTF-8" } request.connection.write_headers( ResponseStartLine(request.version, 204, 'OK'), HTTPHeaders(**headers)) request.connection.write('') request.connection.finish()
def get(self, doc_id, callback, attachments=False): def _really_callback(response): if response.code == 200: data = json.loads(response.body.decode('utf-8')) doc = Document(self, data) callback(doc) elif response.code == 404: # Document doesn't exist callback(None) else: callback(_error_response(response)) doc_id = urlquote(doc_id, safe='') kwargs = {} if attachments is True: doc_id += '?attachments=true' kwargs['headers'] = HTTPHeaders({ 'Content-Type': 'application/json', 'Accept': 'application/json', }) self._fetch(doc_id, _really_callback, **kwargs)
def api_call(self, url, method, body=None, headers=None, callback=None): AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") http_client = AsyncHTTPClient() dictheaders = {"content-type": "application/json"} if hasattr(self, 'current_user') and self.current_user and 'token' in self.current_user: dictheaders['Linc-Api-AuthToken'] = self.current_user.get('token', '') if headers: for k, v in headers.items(): dictheaders[k] = v h = HTTPHeaders(dictheaders) params = { 'headers': h, 'url': url, 'method': method, 'request_timeout': 720, 'validate_cert': False} if method in ['POST', 'PUT']: params['body'] = body request = HTTPRequest(**params) try: response = yield http_client.fetch(request) except HTTPError as e: info('HTTTP error returned... ') info('Code: ' + str(e.code)) if e.response: info('URL: ' + str(e.response.effective_url)) info('Reason: ' + str(e.response.reason)) info('Body: ' + str(e.response.body)) response = e.response else: response = e except Exception as e: # Other errors are possible, such as IOError. info("Other Errors: " + str(e)) response = e callback(response)
async def orcid_oauth2_request( self, url: str, access_token: Union[dict, str], method: str = 'GET', **kwargs: Any, ): """ Make Request to ORCID API With OAuth2 Token Args: url: The full request URL in string access_token: the ORCID access token, either the entire object or the token in string. It is NOT the OpenID Connect ID Token. method: HTTP Method to use, string. **kwargs: Any additional keyword arguments are directly passed to the tornado.httpclient.AsyncHTTPClient.fetch method. Raises: ValueError: if the token looks obviously invalid HTTPError: if the request fails. Note Tornado seems to use 599 for timeouts JSONDecodeError: if JSON decoding fails. Check if you're making requests to the correct ORCID endpoint. """ http = self.get_auth_http_client() headers = HTTPHeaders() if isinstance(access_token, str): token_str = access_token elif isinstance(access_token, dict): if 'token_type' not in access_token \ or access_token['token_type'] != 'bearer' \ or 'access_token' not in access_token: raise ValueError("Token seems invalid") else: token_str = access_token['access_token'] else: raise ValueError("Token seems invalid") headers.add('Authorization', f'Bearer {token_str}') headers.add('Accept', 'application/json') resp = await http.fetch(url, method=method, headers=headers, **kwargs) ret = json_decode(resp.body) return ret
def weibo_request(self, path, callback, access_token=None, expires_in=None, post_args=None, **args): url = "https://api.weibo.com/2/" + path + ".json" all_args = {} if access_token: all_args['access_token'] = access_token all_args.update(args) all_args.update(post_args or {}) header = HTTPHeaders({'Authorization': 'OAuth2 %s' % access_token}) callback = self.async_callback(self._on_weibo_request, callback) http = httpclient.AsyncHTTPClient() if post_args is not None: has_file = False for key, value in post_args.iteritems(): if hasattr(value, "read"): has_file = True if has_file: post_args, boundary = encode_multipart(post_args) header.add('Content-Type', 'multipart/form-data; boundary=%s' % boundary) header.add('Content-Length', len(post_args)) http.fetch(url, method="POST", body=post_args, callback=callback, headers=header) else: http.fetch(url, method="POST", body=urllib.urlencode(all_args), callback=callback, headers=header) else: if all_args: url += "?" + urllib.urlencode(all_args) http.fetch(url, callback=callback, headers=header)
def make_post_request(url, data='', headers=None, files=None, content_type=None, connect_timeout=None, request_timeout=None, follow_redirects=True): if files: body, content_type = make_mfd(data, files) else: body = make_body(data) headers = HTTPHeaders() if headers is None else HTTPHeaders(headers) if content_type is None: content_type = headers.get('Content-Type', 'application/x-www-form-urlencoded') headers.update({'Content-Type': content_type, 'Content-Length': str(len(body))}) return HTTPRequest( url=url, body=body, method='POST', headers=headers, follow_redirects=follow_redirects, connect_timeout=connect_timeout, request_timeout=request_timeout )