def _consume_current_data(self, size): written = 0 if self._current_data is None: written = self._buffer.write(self.boundary) written += self._buffer.write('\r\n') elif (self._current_data is not None and super_len(self._current_data) > 0): written = self._buffer.write(self._current_data.read(size)) if super_len(self._current_data) == 0: written += self._buffer.write('\r\n{0}\r\n'.format(self.boundary)) return written
def _consume_current_data(self, size): """Consume bytes from current field (_current_data, if any) to _buffer. If field is finished after operation, add parts separator. :returns number of bytes written to _buffer""" written = 0 # File objects need an integer size if size is None: size = -1 if self._current_data is not None: # and super_len(self._current_data) >= 0): # Read from field, copy to _buffer: written = self._buffer.write(self._current_data.read(size)) #print " written=",written," super_len(self._current_data) == ",super_len(self._current_data) if super_len(self._current_data) == 0: # we'we just finished current field: emit boundary now # and reset it to None to forget self._current_data = None written += self._buffer.write( encode_with('\r\n{0}'.format(self.boundary), self.encoding) ) # If this is the last separator add -- before \r\n: #print "END of field: current_number=",self._current_field_number if self._current_field_number == len(self.fields): self._buffer.write(encode_with('--', self.encoding)) self._buffer.write(encode_with('\r\n', self.encoding)) return written
def _load_bytes(self, size): self._buffer.smart_truncate() amount_to_load = size - super_len(self._buffer) bytes_to_append = True while amount_to_load > 0 and bytes_to_append: bytes_to_append = self._get_bytes() amount_to_load -= self._buffer.append(bytes_to_append)
def _calculate_length(self): boundary_len = len(self.boundary) # Length of --{boundary} self._len = 0 for (header, data) in self._fields_list: # boundary length + header length + body length + len('\r\n') * 2 self._len += boundary_len + len(header) + super_len(data) + 4 # Length of trailing boundary '--{boundary}--\r\n' self._len += boundary_len + 4
def test_super_len_io_streams(self): """ Ensures that we properly deal with different kinds of IO streams. """ # uses StringIO or io.StringIO (see import above) from io import BytesIO from requests.utils import super_len assert super_len(StringIO.StringIO()) == 0 assert super_len(StringIO.StringIO("with so much drama in the LBC")) == 29 assert super_len(BytesIO()) == 0 assert super_len(BytesIO(b"it's kinda hard bein' snoop d-o-double-g")) == 40 try: import cStringIO except ImportError: pass else: assert super_len(cStringIO.StringIO("but some how, some way...")) == 25
def _calculate_length(self): boundary_len = len(self.boundary) # Length of --{boundary} self._len = 0 for (header, data) in self._fields_list: # boundary length + header length + body length + len('\r\n') * 2 # note that in case header contains non-ascii chars, count must be based on encoded value: self._len += boundary_len + len(encode_with(header, self.encoding)) + super_len(data) + 4 # Length of trailing boundary '--{boundary}--\r\n' self._len += boundary_len + 4
def _consume_current_data(self, size): written = 0 if self._current_data is None: written = self._buffer.write(self.boundary.encode()) written += self._buffer.write('\r\n'.encode()) elif (self._current_data is not None and super_len(self._current_data) > 0): written = self._buffer.write(self._current_data.read(size)) if (self._current_data is not None and super_len(self._current_data) - self._current_data.tell() == 0 and not self.finished): written += self._buffer.write( '\r\n{0}\r\n'.format(self.boundary).encode() ) return written
def test_super_len_handles_files_raising_weird_errors_in_tell(self, error): """If tell() raises errors, assume the cursor is at position zero.""" class BoomFile(object): def __len__(self): return 5 def tell(self): raise error() assert super_len(BoomFile()) == 0
def _calculate_length(self): """ This uses the parts to calculate the length of the body. This returns the calculated length so __len__ can be lazy. """ boundary_len = len(self.boundary) # Length of --{boundary} # boundary length + header length + body length + len('\r\n') * 2 self._len = sum((boundary_len + super_len(p) + 4) for p in self.parts) + boundary_len + 4 return self._len
def smart_truncate(self): to_be_read = super_len(self) already_read = self._get_end() - to_be_read if already_read >= to_be_read: old_bytes = self.read() self.seek(0, 0) self.truncate() self.write(old_bytes) self.seek(0, 0) # We want to be at the beginning
def test_super_len_tell_ioerror(self, error): """Ensure that if tell gives an IOError super_len doesn't fail""" class NoLenBoomFile(object): def tell(self): raise error() def seek(self, offset, whence): pass assert super_len(NoLenBoomFile()) == 0
def test_stringio(self): """Test StringIO wrapper""" parent_fd = io.BytesIO(b"0123456789" * 1024) parent_fd.seek(0) with SubsetIO(parent_fd, 1, 10) as chunked_fd: self.assertEqual(super_len(chunked_fd), 10) data = chunked_fd.read() self.assertEqual(data, b"1234567890")
def bytes_left_to_write(self): """Determine if there are bytes left to write. :returns: bool -- ``True`` if there are bytes left to write, otherwise ``False`` """ to_read = 0 if self.headers_unread: to_read += len(self.headers) return (to_read + super_len(self.body)) > 0
def _consume_current_data(self, size): written = 0 # File objects need an integer size if size is None: size = -1 if self._current_data is None: written = self._buffer.write(self.boundary.encode('utf-8')) written += self._buffer.write('\r\n'.encode('utf-8')) elif (self._current_data is not None and super_len(self._current_data) > 0): written = self._buffer.write(self._current_data.read(size)) if super_len(self._current_data) == 0 and not self.finished: written += self._buffer.write( '\r\n{0}\r\n'.format(self.boundary).encode('utf-8') ) return written
def _calculate_load_amount(self, read_size): """This calculates how many bytes need to be added to the buffer. When a consumer read's ``x`` from the buffer, there are two cases to satisfy: 1. Enough data in the buffer to return the requested amount 2. Not enough data This function uses the amount of unread bytes in the buffer and determines how much the Encoder has to load before it can return the requested amount of bytes. :param int read_size: the number of bytes the consumer requests :returns: int -- the number of bytes that must be loaded into the buffer before the read can be satisfied. This will be strictly non-negative """ amount = read_size - super_len(self._buffer) return amount if amount > 0 else 0
def write_to(self, buffer, size): """Write the requested amount of bytes to the buffer provided. The number of bytes written may exceed size on the first read since we load the headers ambitiously. :param CustomBytesIO buffer: buffer we want to write bytes to :param int size: number of bytes requested to be written to the buffer :returns: int -- number of bytes actually written """ written = 0 if self.headers_unread: written += buffer.append(self.headers) self.headers_unread = False while super_len(self.body) > 0 and (size == -1 or written < size): amount_to_read = size if size != -1: amount_to_read = size - written written += buffer.append(self.body.read(amount_to_read)) return written
def __init__(self, headers, body): self.headers = headers self.body = body self.headers_unread = True self.len = len(self.headers) + super_len(self.body)
def __init__(self, name, *, filename=None, headers=None, content_type=None, file=None, filepath=None, content=None, encoding='utf-8'): self.name = quote(name, safe='') self.headers = headers or {} self.content_length = None self._should_close_file = False if content is not None: if isinstance(content, str): content = content.encode(encoding) self.content = content if filepath is not None: file = open(filepath, 'rb') self._should_close_file = True if file is not None: if not isinstance(file, AsyncFile): file = AsyncFile(file) self.file = file if content is None and file is None: raise ValueError('Field data must be provided.') if content is not None and file is not None: raise ValueError("Can't provide both content and file.") if content is not None: self.content_length = len(content) self._body_position = None else: with file.blocking() as f: self.content_length = super_len(f) self._body_position = safe_tell(f) if filename is None: if filepath is None and file is not None: filepath = getattr(file, 'name') if filepath is not None: filename = basename(filepath) if filename is not None: filename = quote(filename, safe='') self.filename = filename if content_type is None and filename is not None: content_type = mimetypes.guess_type(filename)[0] if content_type is not None: self.headers['Content-Type'] = content_type disposition = ['form-data', f'name="{self.name}"'] if self.filename is not None: disposition.append(f'filename="{self.filename}"') self.headers['Content-Disposition'] = '; '.join(disposition) self.encoded_headers = encode_headers(self.headers)
def test_file(self, tmpdir, mode, warnings_num, recwarn): file_obj = tmpdir.join('test.txt') file_obj.write('Test') with file_obj.open(mode) as fd: assert super_len(fd) == 4 assert len(recwarn) == warnings_num
def test_io_streams(self, stream, value): """Ensures that we properly deal with different kinds of IO streams.""" assert super_len(stream()) == 0 assert super_len(stream(value)) == 4
def test_super_len_with_fileno(self): with open(__file__, 'rb') as f: length = super_len(f) file_data = f.read() assert length == len(file_data)
def test_super_len_with_tell(self): foo = StringIO.StringIO('12345') assert super_len(foo) == 5 foo.read(2) assert super_len(foo) == 3
def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. content_type = 'application/json' body = dumps(json) if not isinstance(body, bytes): body = body.encode('utf-8') is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, ((str, bytes), list, tuple, Mapping)) ]) try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None if is_stream: body = data if getattr(body, 'tell', None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: raise NotImplementedError( 'Streamed bodies and files are mutually exclusive.') if length: self.headers['Content-Length'] = str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, (str, bytes)) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ('content-type' not in self.headers): self.headers['Content-Type'] = content_type self.body = body
def test_super_len_with_no_matches(self): """Ensure that objects without any length methods default to 0""" assert super_len(object()) == 0
def __len__(self): return super_len(self.fd) - self.fd.tell()
def _request(self, method, url_or_endpoint, **kwargs): http_client = AsyncHTTPClient() http_client = AsyncHTTPClient() if not url_or_endpoint.startswith(('http://', 'https://')): api_base_url = kwargs.pop('api_base_url', self.API_BASE_URL) url = '{base}{endpoint}'.format(base=api_base_url, endpoint=url_or_endpoint) else: url = url_or_endpoint headers = {} params = kwargs.get('params', {}) if 'access_token' not in params: # 这里需要针对 tornado 特殊处理 access_token = None if self.access_token: if not self.expires_at: # user provided access_token, just return it access_token = self.access_token else: timestamp = time.time() if self.expires_at - timestamp > 60: access_token = self.access_token if not access_token: # fetch access yield self.fetch_access_token() access_token = self.access_token params['access_token'] = self.access_token params = urlencode(dict((k, to_binary(v)) for k, v in params.items())) url = '{0}?{1}'.format(url, params) data = kwargs.get('data', {}) files = kwargs.get('files') if files: from requests.models import RequestEncodingMixin from requests.utils import super_len body, content_type = RequestEncodingMixin._encode_files( files, data) headers['Content-Type'] = content_type headers['Content-Length'] = super_len(body) else: if isinstance(data, dict): body = json.dumps(data, ensure_ascii=False) body = body.encode('utf-8') else: body = data result_processor = kwargs.pop('result_processor', None) timeout = kwargs.get('timeout', self.timeout) req = HTTPRequest(url=url, method=method.upper(), headers=headers, body=body if method.upper() != "GET" else None, request_timeout=timeout) res = yield http_client.fetch(req) if res.error is not None: raise WeChatClientException(errcode=None, errmsg=None, client=self, request=req, response=res) result = self._handle_result(res, method, url, result_processor, **kwargs) raise Return(result)
def test_string(self): assert super_len('Test') == 4
def test_super_len_with__len__(self): foo = [1,2,3,4] len_foo = super_len(foo) assert len_foo == 4
def len(self): return super_len(self.fd) - self.fd.tell()
def test_super_len_with_no__len__(self): class LenFile(object): def __init__(self): self.len = 5 assert super_len(LenFile()) == 5
def test_super_len_correctly_calculates_len_of_partially_read_file(self): """Ensure that we handle partially consumed file like objects.""" s = StringIO.StringIO() s.write('foobarbogus') assert super_len(s) == 0
def _request(self, method, url_or_endpoint, **kwargs): http_client = AsyncHTTPClient() if not url_or_endpoint.startswith(('http://', 'https://')): api_base_url = kwargs.pop('api_base_url', self.API_BASE_URL) url = '{base}{endpoint}'.format( base=api_base_url, endpoint=url_or_endpoint ) else: url = url_or_endpoint headers = {} params = kwargs.pop('params', {}) if 'access_token' not in params: params['access_token'] = self.access_token params = urlencode(dict((k, to_binary(v)) for k, v in params.items())) url = '{0}?{1}'.format(url, params) data = kwargs.get('data') files = kwargs.get('files') if files: from requests.models import RequestEncodingMixin from requests.utils import super_len body, content_type = RequestEncodingMixin._encode_files( files, data ) headers['Content-Type'] = content_type headers['Content-Length'] = super_len(body) else: if isinstance(data, dict): body = json.dumps(data, ensure_ascii=False) body = body.encode('utf-8') else: body = data result_processor = kwargs.pop('result_processor', None) timeout = kwargs.get('timeout', self.timeout) req = HTTPRequest( url=url, method=method.upper(), headers=headers, body=body, request_timeout=timeout ) res = yield http_client.fetch(req) if res.error is not None: raise WeChatClientException( errcode=None, errmsg=None, client=self, request=req, response=res ) result = self._handle_result( res, method, url, result_processor, **kwargs ) raise Return(result)
def __len__(self): return len(self.headers) + super_len(self.body)
def test_string(self): assert super_len("Test") == 4
def test_super_len_with__len__(self): foo = [1, 2, 3, 4] len_foo = super_len(foo) assert len_foo == 4