def test_chunked_with_extra_headers(authorized_transport, secret_file): blob_name, data, headers = secret_file num_chunks = 4 chunk_size = 12 assert (num_chunks - 1) * chunk_size < len(data) < num_chunks * chunk_size # Create the actual download object. media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name) stream = io.BytesIO() download = resumable_requests.ChunkedDownload(media_url, chunk_size, stream, headers=headers) # Consume the resource in chunks. num_responses, last_response = consume_chunks(download, authorized_transport, len(data), data) # Make sure the combined chunks are the whole object. assert stream.getvalue() == data # Check that we have the right number of responses. assert num_responses == num_chunks # Make sure the last chunk isn't the same size. assert len(last_response.content) < chunk_size check_tombstoned(download, authorized_transport) # Attempt to consume the resource **without** the headers. stream_wo = io.BytesIO() download_wo = resumable_requests.ChunkedDownload(media_url, chunk_size, stream_wo) with pytest.raises(resumable_media.InvalidResponse) as exc_info: download_wo.consume_next_chunk(authorized_transport) assert stream_wo.tell() == 0 check_error_response(exc_info, http_client.BAD_REQUEST, ENCRYPTED_ERR) assert download_wo.invalid
def test_chunked_download(add_files, authorized_transport): for info in ALL_FILES: actual_contents = _get_contents(info) blob_name = _get_blob_name(info) total_bytes = len(actual_contents) num_chunks, chunk_size = get_chunk_size(7, total_bytes) # Create the actual download object. media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name) stream = io.BytesIO() download = resumable_requests.ChunkedDownload(media_url, chunk_size, stream) # Consume the resource in chunks. num_responses, last_response = consume_chunks(download, authorized_transport, total_bytes, actual_contents) # Make sure the combined chunks are the whole object. assert stream.getvalue() == actual_contents # Check that we have the right number of responses. assert num_responses == num_chunks # Make sure the last chunk isn't the same size. assert total_bytes % chunk_size != 0 assert len(last_response.content) < chunk_size check_tombstoned(download, authorized_transport)
def test_chunked_download_partial(add_files, authorized_transport): for info in ALL_FILES: actual_contents = _get_contents(info) blob_name = _get_blob_name(info) media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name) for slice_ in info[u"slices"]: # Manually replace a missing start with 0. start = 0 if slice_.start is None else slice_.start # Chunked downloads don't support a negative index. if start < 0: continue # First determine how much content is in the slice and # use it to determine a chunking strategy. total_bytes = len(actual_contents) if slice_.stop is None: end_byte = total_bytes - 1 end = None else: # Python slices DO NOT include the last index, though a byte # range **is** inclusive of both endpoints. end_byte = slice_.stop - 1 end = end_byte num_chunks, chunk_size = get_chunk_size(7, end_byte - start + 1) # Create the actual download object. stream = io.BytesIO() download = resumable_requests.ChunkedDownload(media_url, chunk_size, stream, start=start, end=end) # Consume the resource in chunks. num_responses, last_response = consume_chunks( download, authorized_transport, total_bytes, actual_contents) # Make sure the combined chunks are the whole slice. assert stream.getvalue() == actual_contents[slice_] # Check that we have the right number of responses. assert num_responses == num_chunks # Make sure the last chunk isn't the same size. assert len(last_response.content) < chunk_size check_tombstoned(download, authorized_transport)
def __enter__(self): url: str = f"https://www.googleapis.com/download/storage/v1/b/{self._bucket.name}/o/{self._blob.name}?alt=media" self._request = requests.ChunkedDownload(media_url=url, chunk_size=self._chunk_size, stream=self) return self